From 953da07451fefb2ba4280ae21995a5d1236168cf Mon Sep 17 00:00:00 2001 From: Travis Prescott Date: Mon, 3 May 2021 11:12:20 -0700 Subject: [PATCH 1/6] Generate azure-servicefabric 8.0 --- .../azure/servicefabric/__init__.py | 20 +- .../azure/servicefabric/_configuration.py | 73 +- .../_service_fabric_client_ap_is.py | 88 - .../_service_fabric_client_apis.py | 123 + .../servicefabric/{version.py => _version.py} | 10 +- .../azure/servicefabric/aio/__init__.py | 10 + .../azure/servicefabric/aio/_configuration.py | 62 + .../aio/_service_fabric_client_apis.py | 116 + .../servicefabric/aio/operations/__init__.py | 31 + .../_mesh_application_operations.py | 329 + .../_mesh_code_package_operations.py | 114 + .../operations/_mesh_gateway_operations.py | 271 + .../operations/_mesh_network_operations.py | 276 + .../aio/operations/_mesh_secret_operations.py | 276 + .../_mesh_secret_value_operations.py | 360 + .../operations/_mesh_service_operations.py | 158 + .../_mesh_service_replica_operations.py | 166 + .../aio/operations/_mesh_volume_operations.py | 271 + .../_service_fabric_client_apis_operations.py | 16649 ++++++++++++ .../azure/servicefabric/models/__init__.py | 1272 +- .../azure/servicefabric/models/_models.py | 20062 +++++++------- .../azure/servicefabric/models/_models_py3.py | 21728 ++++++++++------ .../_service_fabric_client_ap_is_enums.py | 1079 - .../_service_fabric_client_apis_enums.py | 2092 ++ .../servicefabric/operations/__init__.py | 11 +- .../_mesh_application_operations.py | 369 +- .../_mesh_code_package_operations.py | 106 +- .../operations/_mesh_gateway_operations.py | 308 +- .../operations/_mesh_network_operations.py | 310 +- .../operations/_mesh_secret_operations.py | 303 +- .../_mesh_secret_value_operations.py | 402 +- .../operations/_mesh_service_operations.py | 169 +- .../_mesh_service_replica_operations.py | 167 +- .../operations/_mesh_volume_operations.py | 303 +- ..._service_fabric_client_ap_is_operations.py | 16433 ------------ .../_service_fabric_client_apis_operations.py | 16866 ++++++++++++ .../azure/servicefabric/py.typed | 1 + 37 files changed, 64643 insertions(+), 36741 deletions(-) delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_apis.py rename sdk/servicefabric/azure-servicefabric/azure/servicefabric/{version.py => _version.py} (84%) create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/__init__.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_configuration.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_service_fabric_client_apis.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/__init__.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_application_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_code_package_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_gateway_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_network_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_value_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_replica_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_volume_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_service_fabric_client_apis_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_ap_is_enums.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_apis_enums.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_ap_is_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_apis_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/py.typed diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/__init__.py index 4fb9457e85d0..86d9a940f45c 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/__init__.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/__init__.py @@ -1,19 +1,19 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._configuration import ServiceFabricClientAPIsConfiguration -from ._service_fabric_client_ap_is import ServiceFabricClientAPIs -__all__ = ['ServiceFabricClientAPIs', 'ServiceFabricClientAPIsConfiguration'] - -from .version import VERSION +from ._service_fabric_client_apis import ServiceFabricClientAPIs +from ._version import VERSION __version__ = VERSION +__all__ = ['ServiceFabricClientAPIs'] +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_configuration.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_configuration.py index f742cea0cdb9..56bf252d290d 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_configuration.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_configuration.py @@ -1,43 +1,66 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from msrest import Configuration +from typing import TYPE_CHECKING -from .version import VERSION +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + + from azure.core.credentials import TokenCredential class ServiceFabricClientAPIsConfiguration(Configuration): - """Configuration for ServiceFabricClientAPIs + """Configuration for ServiceFabricClientAPIs. + Note that all parameters used to create this instance are saved as instance attributes. - :param credentials: Subscription credentials which uniquely identify - client subscription. - :type credentials: None - :param str base_url: Service URL + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.TokenCredential """ def __init__( - self, credentials, base_url=None): - - if credentials is None: - raise ValueError("Parameter 'credentials' must not be None.") - if not base_url: - base_url = 'http://localhost:19080' - - super(ServiceFabricClientAPIsConfiguration, self).__init__(base_url) - - # Starting Autorest.Python 4.0.64, make connection pool activated by default - self.keep_alive = True + self, + credential, # type: "TokenCredential" + **kwargs # type: Any + ): + # type: (...) -> None + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + super(ServiceFabricClientAPIsConfiguration, self).__init__(**kwargs) - self.add_user_agent('azure-servicefabric/{}'.format(VERSION)) + self.credential = credential + self.api_version = "8.0" + self.credential_scopes = kwargs.pop('credential_scopes', []) + kwargs.setdefault('sdk_moniker', 'servicefabric/{}'.format(VERSION)) + self._configure(**kwargs) - self.credentials = credentials + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if not self.credential_scopes and not self.authentication_policy: + raise ValueError("You must provide either credential_scopes or authentication_policy as kwargs") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py deleted file mode 100644 index babf96112b84..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.service_client import SDKClient -from msrest import Serializer, Deserializer - -from ._configuration import ServiceFabricClientAPIsConfiguration -from .operations import ServiceFabricClientAPIsOperationsMixin -from .operations import MeshSecretOperations -from .operations import MeshSecretValueOperations -from .operations import MeshVolumeOperations -from .operations import MeshNetworkOperations -from .operations import MeshApplicationOperations -from .operations import MeshServiceOperations -from .operations import MeshCodePackageOperations -from .operations import MeshServiceReplicaOperations -from .operations import MeshGatewayOperations -from . import models - - -class ServiceFabricClientAPIs(ServiceFabricClientAPIsOperationsMixin, SDKClient): - """Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services. - - :ivar config: Configuration for client. - :vartype config: ServiceFabricClientAPIsConfiguration - - :ivar mesh_secret: MeshSecret operations - :vartype mesh_secret: azure.servicefabric.operations.MeshSecretOperations - :ivar mesh_secret_value: MeshSecretValue operations - :vartype mesh_secret_value: azure.servicefabric.operations.MeshSecretValueOperations - :ivar mesh_volume: MeshVolume operations - :vartype mesh_volume: azure.servicefabric.operations.MeshVolumeOperations - :ivar mesh_network: MeshNetwork operations - :vartype mesh_network: azure.servicefabric.operations.MeshNetworkOperations - :ivar mesh_application: MeshApplication operations - :vartype mesh_application: azure.servicefabric.operations.MeshApplicationOperations - :ivar mesh_service: MeshService operations - :vartype mesh_service: azure.servicefabric.operations.MeshServiceOperations - :ivar mesh_code_package: MeshCodePackage operations - :vartype mesh_code_package: azure.servicefabric.operations.MeshCodePackageOperations - :ivar mesh_service_replica: MeshServiceReplica operations - :vartype mesh_service_replica: azure.servicefabric.operations.MeshServiceReplicaOperations - :ivar mesh_gateway: MeshGateway operations - :vartype mesh_gateway: azure.servicefabric.operations.MeshGatewayOperations - - :param credentials: Subscription credentials which uniquely identify - client subscription. - :type credentials: None - :param str base_url: Service URL - """ - - def __init__( - self, credentials, base_url=None): - - self.config = ServiceFabricClientAPIsConfiguration(credentials, base_url) - super(ServiceFabricClientAPIs, self).__init__(self.config.credentials, self.config) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '7.2.0.46' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.mesh_secret = MeshSecretOperations( - self._client, self.config, self._serialize, self._deserialize) - self.mesh_secret_value = MeshSecretValueOperations( - self._client, self.config, self._serialize, self._deserialize) - self.mesh_volume = MeshVolumeOperations( - self._client, self.config, self._serialize, self._deserialize) - self.mesh_network = MeshNetworkOperations( - self._client, self.config, self._serialize, self._deserialize) - self.mesh_application = MeshApplicationOperations( - self._client, self.config, self._serialize, self._deserialize) - self.mesh_service = MeshServiceOperations( - self._client, self.config, self._serialize, self._deserialize) - self.mesh_code_package = MeshCodePackageOperations( - self._client, self.config, self._serialize, self._deserialize) - self.mesh_service_replica = MeshServiceReplicaOperations( - self._client, self.config, self._serialize, self._deserialize) - self.mesh_gateway = MeshGatewayOperations( - self._client, self.config, self._serialize, self._deserialize) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_apis.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_apis.py new file mode 100644 index 000000000000..d0d6922f36f3 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_apis.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core import PipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Optional + + from azure.core.credentials import TokenCredential + from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from ._configuration import ServiceFabricClientAPIsConfiguration +from .operations import ServiceFabricClientAPIsOperationsMixin +from .operations import MeshSecretOperations +from .operations import MeshSecretValueOperations +from .operations import MeshVolumeOperations +from .operations import MeshNetworkOperations +from .operations import MeshApplicationOperations +from .operations import MeshServiceOperations +from .operations import MeshCodePackageOperations +from .operations import MeshServiceReplicaOperations +from .operations import MeshGatewayOperations +from . import models + + +class ServiceFabricClientAPIs(ServiceFabricClientAPIsOperationsMixin): + """Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services. + + :ivar mesh_secret: MeshSecretOperations operations + :vartype mesh_secret: azure.servicefabric.operations.MeshSecretOperations + :ivar mesh_secret_value: MeshSecretValueOperations operations + :vartype mesh_secret_value: azure.servicefabric.operations.MeshSecretValueOperations + :ivar mesh_volume: MeshVolumeOperations operations + :vartype mesh_volume: azure.servicefabric.operations.MeshVolumeOperations + :ivar mesh_network: MeshNetworkOperations operations + :vartype mesh_network: azure.servicefabric.operations.MeshNetworkOperations + :ivar mesh_application: MeshApplicationOperations operations + :vartype mesh_application: azure.servicefabric.operations.MeshApplicationOperations + :ivar mesh_service: MeshServiceOperations operations + :vartype mesh_service: azure.servicefabric.operations.MeshServiceOperations + :ivar mesh_code_package: MeshCodePackageOperations operations + :vartype mesh_code_package: azure.servicefabric.operations.MeshCodePackageOperations + :ivar mesh_service_replica: MeshServiceReplicaOperations operations + :vartype mesh_service_replica: azure.servicefabric.operations.MeshServiceReplicaOperations + :ivar mesh_gateway: MeshGatewayOperations operations + :vartype mesh_gateway: azure.servicefabric.operations.MeshGatewayOperations + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.TokenCredential + :param str base_url: Service URL + """ + + def __init__( + self, + credential, # type: "TokenCredential" + base_url=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + if not base_url: + base_url = 'http://localhost:19080/' + self._config = ServiceFabricClientAPIsConfiguration(credential, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.mesh_secret = MeshSecretOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_secret_value = MeshSecretValueOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_volume = MeshVolumeOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_network = MeshNetworkOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_application = MeshApplicationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_service = MeshServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_code_package = MeshCodePackageOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_service_replica = MeshServiceReplicaOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_gateway = MeshGatewayOperations( + self._client, self._config, self._serialize, self._deserialize) + + def _send_request(self, http_request, **kwargs): + # type: (HttpRequest, Any) -> HttpResponse + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.HttpResponse + """ + http_request.url = self._client.format_url(http_request.url) + stream = kwargs.pop("stream", True) + pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> ServiceFabricClientAPIs + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/version.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_version.py similarity index 84% rename from sdk/servicefabric/azure-servicefabric/azure/servicefabric/version.py rename to sdk/servicefabric/azure-servicefabric/azure/servicefabric/_version.py index 9ceb2dbc7a5c..c268602f0728 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/version.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_version.py @@ -1,13 +1,9 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "7.2.0.46" - +VERSION = "8.0.0.0" diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/__init__.py new file mode 100644 index 000000000000..d91c5f9455ba --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_fabric_client_apis import ServiceFabricClientAPIs +__all__ = ['ServiceFabricClientAPIs'] diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_configuration.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_configuration.py new file mode 100644 index 000000000000..cbb3364c125c --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_configuration.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class ServiceFabricClientAPIsConfiguration(Configuration): + """Configuration for ServiceFabricClientAPIs. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + **kwargs: Any + ) -> None: + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + super(ServiceFabricClientAPIsConfiguration, self).__init__(**kwargs) + + self.credential = credential + self.api_version = "8.0" + self.credential_scopes = kwargs.pop('credential_scopes', []) + kwargs.setdefault('sdk_moniker', 'servicefabric/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if not self.credential_scopes and not self.authentication_policy: + raise ValueError("You must provide either credential_scopes or authentication_policy as kwargs") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_service_fabric_client_apis.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_service_fabric_client_apis.py new file mode 100644 index 000000000000..1614ad668b47 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_service_fabric_client_apis.py @@ -0,0 +1,116 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Optional, TYPE_CHECKING + +from azure.core import AsyncPipelineClient +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +from ._configuration import ServiceFabricClientAPIsConfiguration +from .operations import ServiceFabricClientAPIsOperationsMixin +from .operations import MeshSecretOperations +from .operations import MeshSecretValueOperations +from .operations import MeshVolumeOperations +from .operations import MeshNetworkOperations +from .operations import MeshApplicationOperations +from .operations import MeshServiceOperations +from .operations import MeshCodePackageOperations +from .operations import MeshServiceReplicaOperations +from .operations import MeshGatewayOperations +from .. import models + + +class ServiceFabricClientAPIs(ServiceFabricClientAPIsOperationsMixin): + """Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services. + + :ivar mesh_secret: MeshSecretOperations operations + :vartype mesh_secret: azure.servicefabric.aio.operations.MeshSecretOperations + :ivar mesh_secret_value: MeshSecretValueOperations operations + :vartype mesh_secret_value: azure.servicefabric.aio.operations.MeshSecretValueOperations + :ivar mesh_volume: MeshVolumeOperations operations + :vartype mesh_volume: azure.servicefabric.aio.operations.MeshVolumeOperations + :ivar mesh_network: MeshNetworkOperations operations + :vartype mesh_network: azure.servicefabric.aio.operations.MeshNetworkOperations + :ivar mesh_application: MeshApplicationOperations operations + :vartype mesh_application: azure.servicefabric.aio.operations.MeshApplicationOperations + :ivar mesh_service: MeshServiceOperations operations + :vartype mesh_service: azure.servicefabric.aio.operations.MeshServiceOperations + :ivar mesh_code_package: MeshCodePackageOperations operations + :vartype mesh_code_package: azure.servicefabric.aio.operations.MeshCodePackageOperations + :ivar mesh_service_replica: MeshServiceReplicaOperations operations + :vartype mesh_service_replica: azure.servicefabric.aio.operations.MeshServiceReplicaOperations + :ivar mesh_gateway: MeshGatewayOperations operations + :vartype mesh_gateway: azure.servicefabric.aio.operations.MeshGatewayOperations + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param str base_url: Service URL + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + base_url: Optional[str] = None, + **kwargs: Any + ) -> None: + if not base_url: + base_url = 'http://localhost:19080/' + self._config = ServiceFabricClientAPIsConfiguration(credential, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.mesh_secret = MeshSecretOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_secret_value = MeshSecretValueOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_volume = MeshVolumeOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_network = MeshNetworkOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_application = MeshApplicationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_service = MeshServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_code_package = MeshCodePackageOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_service_replica = MeshServiceReplicaOperations( + self._client, self._config, self._serialize, self._deserialize) + self.mesh_gateway = MeshGatewayOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse + """ + http_request.url = self._client.format_url(http_request.url) + stream = kwargs.pop("stream", True) + pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "ServiceFabricClientAPIs": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/__init__.py new file mode 100644 index 000000000000..df6b66c53161 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/__init__.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_fabric_client_apis_operations import ServiceFabricClientAPIsOperationsMixin +from ._mesh_secret_operations import MeshSecretOperations +from ._mesh_secret_value_operations import MeshSecretValueOperations +from ._mesh_volume_operations import MeshVolumeOperations +from ._mesh_network_operations import MeshNetworkOperations +from ._mesh_application_operations import MeshApplicationOperations +from ._mesh_service_operations import MeshServiceOperations +from ._mesh_code_package_operations import MeshCodePackageOperations +from ._mesh_service_replica_operations import MeshServiceReplicaOperations +from ._mesh_gateway_operations import MeshGatewayOperations + +__all__ = [ + 'ServiceFabricClientAPIsOperationsMixin', + 'MeshSecretOperations', + 'MeshSecretValueOperations', + 'MeshVolumeOperations', + 'MeshNetworkOperations', + 'MeshApplicationOperations', + 'MeshServiceOperations', + 'MeshCodePackageOperations', + 'MeshServiceReplicaOperations', + 'MeshGatewayOperations', +] diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_application_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_application_operations.py new file mode 100644 index 000000000000..d80172e123a0 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_application_operations.py @@ -0,0 +1,329 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MeshApplicationOperations: + """MeshApplicationOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create_or_update( + self, + application_resource_name: str, + application_resource_description: "_models.ApplicationResourceDescription", + **kwargs + ) -> Optional["_models.ApplicationResourceDescription"]: + """Creates or updates a Application resource. + + Creates a Application resource with the specified name, description and properties. If + Application resource with the same name exists, then it is updated with the specified + description and properties. + + :param application_resource_name: The identity of the application. + :type application_resource_name: str + :param application_resource_description: Description for creating a Application resource. + :type application_resource_description: ~azure.servicefabric.models.ApplicationResourceDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_or_update.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(application_resource_description, 'ApplicationResourceDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore + + async def get( + self, + application_resource_name: str, + **kwargs + ) -> "_models.ApplicationResourceDescription": + """Gets the Application resource with the given name. + + Gets the information about the Application resource with the given name. The information + include the description and other properties of the Application. + + :param application_resource_name: The identity of the application. + :type application_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore + + async def delete( + self, + application_resource_name: str, + **kwargs + ) -> None: + """Deletes the Application resource. + + Deletes the Application resource identified by the name. + + :param application_resource_name: The identity of the application. + :type application_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore + + async def list( + self, + **kwargs + ) -> "_models.PagedApplicationResourceDescriptionList": + """Lists all the application resources. + + Gets the information about all application resources in a given resource group. The information + include the description and other properties of the Application. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedApplicationResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedApplicationResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedApplicationResourceDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/Resources/Applications'} # type: ignore + + async def get_upgrade_progress( + self, + application_resource_name: str, + **kwargs + ) -> "_models.ApplicationResourceUpgradeProgressInfo": + """Gets the progress of the latest upgrade performed on this application resource. + + Gets the upgrade progress information about the Application resource with the given name. The + information include percentage of completion and other upgrade state information of the + Application resource. + + :param application_resource_name: The identity of the application. + :type application_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationResourceUpgradeProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationResourceUpgradeProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResourceUpgradeProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_upgrade_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationResourceUpgradeProgressInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_upgrade_progress.metadata = {'url': '/Resources/Applications/{applicationResourceName}/$/GetUpgradeProgress'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_code_package_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_code_package_operations.py new file mode 100644 index 000000000000..e0b901a23f4a --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_code_package_operations.py @@ -0,0 +1,114 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MeshCodePackageOperations: + """MeshCodePackageOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def get_container_logs( + self, + application_resource_name: str, + service_resource_name: str, + replica_name: str, + code_package_name: str, + tail: Optional[str] = None, + **kwargs + ) -> "_models.ContainerLogs": + """Gets the logs from the container. + + Gets the logs for the container of the specified code package of the service replica. + + :param application_resource_name: The identity of the application. + :type application_resource_name: str + :param service_resource_name: The identity of the service. + :type service_resource_name: str + :param replica_name: Service Fabric replica name. + :type replica_name: str + :param code_package_name: The name of code package of the service. + :type code_package_name: str + :param tail: Number of lines to show from the end of the logs. Default is 100. 'all' to show + the complete logs. + :type tail: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ContainerLogs, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ContainerLogs + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerLogs"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_container_logs.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), + 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True), + 'codePackageName': self._serialize.url("code_package_name", code_package_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if tail is not None: + query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ContainerLogs', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_container_logs.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}/CodePackages/{codePackageName}/Logs'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_gateway_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_gateway_operations.py new file mode 100644 index 000000000000..33301f9ff97e --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_gateway_operations.py @@ -0,0 +1,271 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MeshGatewayOperations: + """MeshGatewayOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create_or_update( + self, + gateway_resource_name: str, + gateway_resource_description: "_models.GatewayResourceDescription", + **kwargs + ) -> Optional["_models.GatewayResourceDescription"]: + """Creates or updates a Gateway resource. + + Creates a Gateway resource with the specified name, description and properties. If Gateway + resource with the same name exists, then it is updated with the specified description and + properties. Use Gateway resource to provide public connectivity to application services. + + :param gateway_resource_name: The identity of the gateway. + :type gateway_resource_name: str + :param gateway_resource_description: Description for creating a Gateway resource. + :type gateway_resource_description: ~azure.servicefabric.models.GatewayResourceDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: GatewayResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.GatewayResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_or_update.metadata['url'] # type: ignore + path_format_arguments = { + 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(gateway_resource_description, 'GatewayResourceDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore + + async def get( + self, + gateway_resource_name: str, + **kwargs + ) -> "_models.GatewayResourceDescription": + """Gets the Gateway resource with the given name. + + Gets the information about the Gateway resource with the given name. The information include + the description and other properties of the Gateway. + + :param gateway_resource_name: The identity of the gateway. + :type gateway_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: GatewayResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.GatewayResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore + + async def delete( + self, + gateway_resource_name: str, + **kwargs + ) -> None: + """Deletes the Gateway resource. + + Deletes the Gateway resource identified by the name. + + :param gateway_resource_name: The identity of the gateway. + :type gateway_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore + + async def list( + self, + **kwargs + ) -> "_models.PagedGatewayResourceDescriptionList": + """Lists all the gateway resources. + + Gets the information about all gateway resources in a given resource group. The information + include the description and other properties of the Gateway. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedGatewayResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedGatewayResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedGatewayResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedGatewayResourceDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/Resources/Gateways'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_network_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_network_operations.py new file mode 100644 index 000000000000..9a6f078c9322 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_network_operations.py @@ -0,0 +1,276 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MeshNetworkOperations: + """MeshNetworkOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create_or_update( + self, + network_resource_name: str, + name: str, + properties: "_models.NetworkResourceProperties", + **kwargs + ) -> Optional["_models.NetworkResourceDescription"]: + """Creates or updates a Network resource. + + Creates a Network resource with the specified name, description and properties. If Network + resource with the same name exists, then it is updated with the specified description and + properties. Network resource provides connectivity between application services. + + :param network_resource_name: The identity of the network. + :type network_resource_name: str + :param name: Name of the Network resource. + :type name: str + :param properties: Describes properties of a network resource. + :type properties: ~azure.servicefabric.models.NetworkResourceProperties + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NetworkResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NetworkResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NetworkResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _network_resource_description = _models.NetworkResourceDescription(name=name, properties=properties) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_or_update.metadata['url'] # type: ignore + path_format_arguments = { + 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_network_resource_description, 'NetworkResourceDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore + + async def get( + self, + network_resource_name: str, + **kwargs + ) -> "_models.NetworkResourceDescription": + """Gets the Network resource with the given name. + + Gets the information about the Network resource with the given name. The information include + the description and other properties of the Network. + + :param network_resource_name: The identity of the network. + :type network_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NetworkResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NetworkResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore + + async def delete( + self, + network_resource_name: str, + **kwargs + ) -> None: + """Deletes the Network resource. + + Deletes the Network resource identified by the name. + + :param network_resource_name: The identity of the network. + :type network_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore + + async def list( + self, + **kwargs + ) -> "_models.PagedNetworkResourceDescriptionList": + """Lists all the network resources. + + Gets the information about all network resources in a given resource group. The information + include the description and other properties of the Network. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedNetworkResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedNetworkResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedNetworkResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedNetworkResourceDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/Resources/Networks'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_operations.py new file mode 100644 index 000000000000..b7cf3f0fe22b --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_operations.py @@ -0,0 +1,276 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MeshSecretOperations: + """MeshSecretOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create_or_update( + self, + secret_resource_name: str, + properties: "_models.SecretResourceProperties", + name: str, + **kwargs + ) -> Optional["_models.SecretResourceDescription"]: + """Creates or updates a Secret resource. + + Creates a Secret resource with the specified name, description and properties. If Secret + resource with the same name exists, then it is updated with the specified description and + properties. Once created, the kind and contentType of a secret resource cannot be updated. + + :param secret_resource_name: The name of the secret resource. + :type secret_resource_name: str + :param properties: Describes the properties of a secret resource. + :type properties: ~azure.servicefabric.models.SecretResourceProperties + :param name: Name of the Secret resource. + :type name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SecretResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _secret_resource_description = _models.SecretResourceDescription(properties=properties, name=name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_or_update.metadata['url'] # type: ignore + path_format_arguments = { + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_secret_resource_description, 'SecretResourceDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SecretResourceDescription', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('SecretResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore + + async def get( + self, + secret_resource_name: str, + **kwargs + ) -> "_models.SecretResourceDescription": + """Gets the Secret resource with the given name. + + Gets the information about the Secret resource with the given name. The information include the + description and other properties of the Secret. + + :param secret_resource_name: The name of the secret resource. + :type secret_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('SecretResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore + + async def delete( + self, + secret_resource_name: str, + **kwargs + ) -> None: + """Deletes the Secret resource. + + Deletes the specified Secret resource and all of its named values. + + :param secret_resource_name: The name of the secret resource. + :type secret_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore + + async def list( + self, + **kwargs + ) -> "_models.PagedSecretResourceDescriptionList": + """Lists all the secret resources. + + Gets the information about all secret resources in a given resource group. The information + include the description and other properties of the Secret. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedSecretResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedSecretResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSecretResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedSecretResourceDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/Resources/Secrets'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_value_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_value_operations.py new file mode 100644 index 000000000000..1d8a0306c6fe --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_value_operations.py @@ -0,0 +1,360 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MeshSecretValueOperations: + """MeshSecretValueOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def add_value( + self, + secret_resource_name: str, + secret_value_resource_name: str, + name: str, + value: Optional[str] = None, + **kwargs + ) -> Optional["_models.SecretValueResourceDescription"]: + """Adds the specified value as a new version of the specified secret resource. + + Creates a new value of the specified secret resource. The name of the value is typically the + version identifier. Once created the value cannot be changed. + + :param secret_resource_name: The name of the secret resource. + :type secret_resource_name: str + :param secret_value_resource_name: The name of the secret resource value which is typically the + version identifier for the value. + :type secret_value_resource_name: str + :param name: Version identifier of the secret value. + :type name: str + :param value: The actual value of the secret. + :type value: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretValueResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretValueResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SecretValueResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _secret_value_resource_description = _models.SecretValueResourceDescription(name=name, value=value) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.add_value.metadata['url'] # type: ignore + path_format_arguments = { + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_secret_value_resource_description, 'SecretValueResourceDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + add_value.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore + + async def get( + self, + secret_resource_name: str, + secret_value_resource_name: str, + **kwargs + ) -> "_models.SecretValueResourceDescription": + """Gets the specified secret value resource. + + Get the information about the specified named secret value resources. The information does not + include the actual value of the secret. + + :param secret_resource_name: The name of the secret resource. + :type secret_resource_name: str + :param secret_value_resource_name: The name of the secret resource value which is typically the + version identifier for the value. + :type secret_value_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretValueResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretValueResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretValueResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore + + async def delete( + self, + secret_resource_name: str, + secret_value_resource_name: str, + **kwargs + ) -> None: + """Deletes the specified value of the named secret resource. + + Deletes the secret value resource identified by the name. The name of the resource is typically + the version associated with that value. Deletion will fail if the specified value is in use. + + :param secret_resource_name: The name of the secret resource. + :type secret_resource_name: str + :param secret_value_resource_name: The name of the secret resource value which is typically the + version identifier for the value. + :type secret_value_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore + + async def list( + self, + secret_resource_name: str, + **kwargs + ) -> "_models.PagedSecretValueResourceDescriptionList": + """List names of all values of the specified secret resource. + + Gets information about all secret value resources of the specified secret resource. The + information includes the names of the secret value resources, but not the actual values. + + :param secret_resource_name: The name of the secret resource. + :type secret_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedSecretValueResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedSecretValueResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSecretValueResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedSecretValueResourceDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values'} # type: ignore + + async def show( + self, + secret_resource_name: str, + secret_value_resource_name: str, + **kwargs + ) -> "_models.SecretValue": + """Lists the specified value of the secret resource. + + Lists the decrypted value of the specified named value of the secret resource. This is a + privileged operation. + + :param secret_resource_name: The name of the secret resource. + :type secret_resource_name: str + :param secret_value_resource_name: The name of the secret resource value which is typically the + version identifier for the value. + :type secret_value_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretValue, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretValue + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretValue"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.show.metadata['url'] # type: ignore + path_format_arguments = { + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('SecretValue', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + show.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}/list_value'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_operations.py new file mode 100644 index 000000000000..963b70b09c2d --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_operations.py @@ -0,0 +1,158 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MeshServiceOperations: + """MeshServiceOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def get( + self, + application_resource_name: str, + service_resource_name: str, + **kwargs + ) -> "_models.ServiceResourceDescription": + """Gets the Service resource with the given name. + + Gets the information about the Service resource with the given name. The information include + the description and other properties of the Service. + + :param application_resource_name: The identity of the application. + :type application_resource_name: str + :param service_resource_name: The identity of the service. + :type service_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}'} # type: ignore + + async def list( + self, + application_resource_name: str, + **kwargs + ) -> "_models.PagedServiceResourceDescriptionList": + """Lists all the service resources. + + Gets the information about all services of an application resource. The information include the + description and other properties of the Service. + + :param application_resource_name: The identity of the application. + :type application_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedServiceResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedServiceResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedServiceResourceDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_replica_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_replica_operations.py new file mode 100644 index 000000000000..0e7ef70ae4d7 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_replica_operations.py @@ -0,0 +1,166 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MeshServiceReplicaOperations: + """MeshServiceReplicaOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def get( + self, + application_resource_name: str, + service_resource_name: str, + replica_name: str, + **kwargs + ) -> "_models.ServiceReplicaDescription": + """Gets the given replica of the service of an application. + + Gets the information about the service replica with the given name. The information include the + description and other properties of the service replica. + + :param application_resource_name: The identity of the application. + :type application_resource_name: str + :param service_resource_name: The identity of the service. + :type service_resource_name: str + :param replica_name: Service Fabric replica name. + :type replica_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceReplicaDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceReplicaDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceReplicaDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), + 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceReplicaDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}'} # type: ignore + + async def list( + self, + application_resource_name: str, + service_resource_name: str, + **kwargs + ) -> "_models.PagedServiceReplicaDescriptionList": + """Lists all the replicas of a service. + + Gets the information about all replicas of a service. The information include the description + and other properties of the service replica. + + :param application_resource_name: The identity of the application. + :type application_resource_name: str + :param service_resource_name: The identity of the service. + :type service_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedServiceReplicaDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedServiceReplicaDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceReplicaDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedServiceReplicaDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_volume_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_volume_operations.py new file mode 100644 index 000000000000..e7e37d3ee33f --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_volume_operations.py @@ -0,0 +1,271 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MeshVolumeOperations: + """MeshVolumeOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create_or_update( + self, + volume_resource_name: str, + volume_resource_description: "_models.VolumeResourceDescription", + **kwargs + ) -> Optional["_models.VolumeResourceDescription"]: + """Creates or updates a Volume resource. + + Creates a Volume resource with the specified name, description and properties. If Volume + resource with the same name exists, then it is updated with the specified description and + properties. + + :param volume_resource_name: The identity of the volume. + :type volume_resource_name: str + :param volume_resource_description: Description for creating a Volume resource. + :type volume_resource_description: ~azure.servicefabric.models.VolumeResourceDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: VolumeResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.VolumeResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VolumeResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_or_update.metadata['url'] # type: ignore + path_format_arguments = { + 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(volume_resource_description, 'VolumeResourceDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore + + async def get( + self, + volume_resource_name: str, + **kwargs + ) -> "_models.VolumeResourceDescription": + """Gets the Volume resource with the given name. + + Gets the information about the Volume resource with the given name. The information include the + description and other properties of the Volume. + + :param volume_resource_name: The identity of the volume. + :type volume_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: VolumeResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.VolumeResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.VolumeResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore + + async def delete( + self, + volume_resource_name: str, + **kwargs + ) -> None: + """Deletes the Volume resource. + + Deletes the Volume resource identified by the name. + + :param volume_resource_name: The identity of the volume. + :type volume_resource_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore + + async def list( + self, + **kwargs + ) -> "_models.PagedVolumeResourceDescriptionList": + """Lists all the volume resources. + + Gets the information about all volume resources in a given resource group. The information + include the description and other properties of the Volume. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedVolumeResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedVolumeResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedVolumeResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedVolumeResourceDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/Resources/Volumes'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_service_fabric_client_apis_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_service_fabric_client_apis_operations.py new file mode 100644 index 000000000000..1577734abd5d --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_service_fabric_client_apis_operations.py @@ -0,0 +1,16649 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ServiceFabricClientAPIsOperationsMixin: + + async def get_cluster_manifest( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ClusterManifest": + """Get the Service Fabric cluster manifest. + + Get the Service Fabric cluster manifest. The cluster manifest contains properties of the + cluster that include different node types on the cluster, + security configurations, fault, and upgrade domain topologies, etc. + + These properties are specified as part of the ClusterConfig.JSON file while deploying a + stand-alone cluster. However, most of the information in the cluster manifest + is generated internally by service fabric during cluster deployment in other deployment + scenarios (e.g. when using Azure portal). + + The contents of the cluster manifest are for informational purposes only and users are not + expected to take a dependency on the format of the file contents or its interpretation. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterManifest, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterManifest + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterManifest"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_manifest.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterManifest', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_manifest.metadata = {'url': '/$/GetClusterManifest'} # type: ignore + + async def get_cluster_health( + self, + nodes_health_state_filter: Optional[int] = 0, + applications_health_state_filter: Optional[int] = 0, + events_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + include_system_application_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ClusterHealth": + """Gets the health of a Service Fabric cluster. + + Use EventsHealthStateFilter to filter the collection of health events reported on the cluster + based on the health state. + Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the + collection of nodes and applications returned based on their aggregated health state. + + :param nodes_health_state_filter: Allows filtering of the node health state objects returned in + the result of cluster health query + based on their health state. The possible values for this parameter include integer value of + one of the + following health states. Only nodes that match the filter are returned. All nodes are used to + evaluate the aggregated health state. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of nodes with HealthState value of + OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type nodes_health_state_filter: int + :param applications_health_state_filter: Allows filtering of the application health state + objects returned in the result of cluster health + query based on their health state. + The possible values for this parameter include integer value obtained from members or bitwise + operations + on members of HealthStateFilter enumeration. Only applications that match the filter are + returned. + All applications are used to evaluate the aggregated health state. If not specified, all + entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of applications with HealthState + value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type applications_health_state_filter: int + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param include_system_application_health_statistics: Indicates whether the health statistics + should include the fabric:/System application health statistics. False by default. + If IncludeSystemApplicationHealthStatistics is set to true, the health statistics include the + entities that belong to the fabric:/System application. + Otherwise, the query result includes health statistics only for user applications. + The health statistics must be included in the query result for this parameter to be applied. + :type include_system_application_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_health.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if nodes_health_state_filter is not None: + query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') + if applications_health_state_filter is not None: + query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if include_system_application_health_statistics is not None: + query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_health.metadata = {'url': '/$/GetClusterHealth'} # type: ignore + + async def get_cluster_health_using_policy( + self, + nodes_health_state_filter: Optional[int] = 0, + applications_health_state_filter: Optional[int] = 0, + events_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + include_system_application_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + application_health_policy_map: Optional[List["_models.ApplicationHealthPolicyMapItem"]] = None, + cluster_health_policy: Optional["_models.ClusterHealthPolicy"] = None, + **kwargs + ) -> "_models.ClusterHealth": + """Gets the health of a Service Fabric cluster using the specified policy. + + Use EventsHealthStateFilter to filter the collection of health events reported on the cluster + based on the health state. + Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the + collection of nodes and applications returned based on their aggregated health state. + Use ClusterHealthPolicies to override the health policies used to evaluate the health. + + :param nodes_health_state_filter: Allows filtering of the node health state objects returned in + the result of cluster health query + based on their health state. The possible values for this parameter include integer value of + one of the + following health states. Only nodes that match the filter are returned. All nodes are used to + evaluate the aggregated health state. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of nodes with HealthState value of + OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type nodes_health_state_filter: int + :param applications_health_state_filter: Allows filtering of the application health state + objects returned in the result of cluster health + query based on their health state. + The possible values for this parameter include integer value obtained from members or bitwise + operations + on members of HealthStateFilter enumeration. Only applications that match the filter are + returned. + All applications are used to evaluate the aggregated health state. If not specified, all + entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of applications with HealthState + value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type applications_health_state_filter: int + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param include_system_application_health_statistics: Indicates whether the health statistics + should include the fabric:/System application health statistics. False by default. + If IncludeSystemApplicationHealthStatistics is set to true, the health statistics include the + entities that belong to the fabric:/System application. + Otherwise, the query result includes health statistics only for user applications. + The health statistics must be included in the query result for this parameter to be applied. + :type include_system_application_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy_map: Defines a map that contains specific application health + policies for different applications. + Each entry specifies as key the application name and as value an ApplicationHealthPolicy used + to evaluate the application health. + If an application is not specified in the map, the application health evaluation uses the + ApplicationHealthPolicy found in its application manifest or the default application health + policy (if no health policy is defined in the manifest). + The map is empty by default. + :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cluster_health_policies = _models.ClusterHealthPolicies(application_health_policy_map=application_health_policy_map, cluster_health_policy=cluster_health_policy) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_cluster_health_using_policy.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if nodes_health_state_filter is not None: + query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') + if applications_health_state_filter is not None: + query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if include_system_application_health_statistics is not None: + query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _cluster_health_policies is not None: + body_content = self._serialize.body(_cluster_health_policies, 'ClusterHealthPolicies') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_health_using_policy.metadata = {'url': '/$/GetClusterHealth'} # type: ignore + + async def get_cluster_health_chunk( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ClusterHealthChunk": + """Gets the health of a Service Fabric cluster using health chunks. + + Gets the health of a Service Fabric cluster using health chunks. Includes the aggregated health + state of the cluster, but none of the cluster entities. + To expand the cluster health and get the health state of all or some of the entities, use the + POST URI and specify the cluster health chunk query description. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterHealthChunk, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterHealthChunk + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealthChunk"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_health_chunk.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterHealthChunk', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_health_chunk.metadata = {'url': '/$/GetClusterHealthChunk'} # type: ignore + + async def get_cluster_health_chunk_using_policy_and_advanced_filters( + self, + timeout: Optional[int] = 60, + cluster_health_chunk_query_description: Optional["_models.ClusterHealthChunkQueryDescription"] = None, + **kwargs + ) -> "_models.ClusterHealthChunk": + """Gets the health of a Service Fabric cluster using health chunks. + + Gets the health of a Service Fabric cluster using health chunks. The health evaluation is done + based on the input cluster health chunk query description. + The query description allows users to specify health policies for evaluating the cluster and + its children. + Users can specify very flexible filters to select which cluster entities to return. The + selection can be done based on the entities health state and based on the hierarchy. + The query can return multi-level children of the entities based on the specified filters. For + example, it can return one application with a specified name, and for this application, return + only services that are in Error or Warning, and all partitions and replicas for one of these + services. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param cluster_health_chunk_query_description: Describes the cluster and application health + policies used to evaluate the cluster health and the filters to select which cluster entities + to be returned. + If the cluster health policy is present, it is used to evaluate the cluster events and the + cluster nodes. If not present, the health evaluation uses the cluster health policy defined in + the cluster manifest or the default cluster health policy. + By default, each application is evaluated using its specific application health policy, + defined in the application manifest, or the default health policy, if no policy is defined in + manifest. + If the application health policy map is specified, and it has an entry for an application, the + specified application health policy + is used to evaluate the application health. + Users can specify very flexible filters to select which cluster entities to include in + response. The selection can be done based on the entities health state and based on the + hierarchy. + The query can return multi-level children of the entities based on the specified filters. For + example, it can return one application with a specified name, and for this application, return + only services that are in Error or Warning, and all partitions and replicas for one of these + services. + :type cluster_health_chunk_query_description: ~azure.servicefabric.models.ClusterHealthChunkQueryDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterHealthChunk, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterHealthChunk + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealthChunk"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_cluster_health_chunk_using_policy_and_advanced_filters.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if cluster_health_chunk_query_description is not None: + body_content = self._serialize.body(cluster_health_chunk_query_description, 'ClusterHealthChunkQueryDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterHealthChunk', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_health_chunk_using_policy_and_advanced_filters.metadata = {'url': '/$/GetClusterHealthChunk'} # type: ignore + + async def report_cluster_health( + self, + health_information: "_models.HealthInformation", + immediate: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Sends a health report on the Service Fabric cluster. + + Sends a health report on a Service Fabric cluster. The report must contain the information + about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway node, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetClusterHealth and check that + the report appears in the HealthEvents section. + + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_cluster_health.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_cluster_health.metadata = {'url': '/$/ReportClusterHealth'} # type: ignore + + async def get_provisioned_fabric_code_version_info_list( + self, + code_version: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> List["_models.FabricCodeVersionInfo"]: + """Gets a list of fabric code versions that are provisioned in a Service Fabric cluster. + + Gets a list of information about fabric code versions that are provisioned in the cluster. The + parameter CodeVersion can be used to optionally filter the output to only that particular + version. + + :param code_version: The product version of Service Fabric. + :type code_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of FabricCodeVersionInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.FabricCodeVersionInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricCodeVersionInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_provisioned_fabric_code_version_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if code_version is not None: + query_parameters['CodeVersion'] = self._serialize.query("code_version", code_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[FabricCodeVersionInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_provisioned_fabric_code_version_info_list.metadata = {'url': '/$/GetProvisionedCodeVersions'} # type: ignore + + async def get_provisioned_fabric_config_version_info_list( + self, + config_version: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> List["_models.FabricConfigVersionInfo"]: + """Gets a list of fabric config versions that are provisioned in a Service Fabric cluster. + + Gets a list of information about fabric config versions that are provisioned in the cluster. + The parameter ConfigVersion can be used to optionally filter the output to only that particular + version. + + :param config_version: The config version of Service Fabric. + :type config_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of FabricConfigVersionInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.FabricConfigVersionInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricConfigVersionInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_provisioned_fabric_config_version_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if config_version is not None: + query_parameters['ConfigVersion'] = self._serialize.query("config_version", config_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[FabricConfigVersionInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_provisioned_fabric_config_version_info_list.metadata = {'url': '/$/GetProvisionedConfigVersions'} # type: ignore + + async def get_cluster_upgrade_progress( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ClusterUpgradeProgressObject": + """Gets the progress of the current cluster upgrade. + + Gets the current progress of the ongoing cluster upgrade. If no upgrade is currently in + progress, get the last state of the previous cluster upgrade. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterUpgradeProgressObject, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterUpgradeProgressObject + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterUpgradeProgressObject"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_upgrade_progress.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterUpgradeProgressObject', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_upgrade_progress.metadata = {'url': '/$/GetUpgradeProgress'} # type: ignore + + async def get_cluster_configuration( + self, + configuration_api_version: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ClusterConfiguration": + """Get the Service Fabric standalone cluster configuration. + + The cluster configuration contains properties of the cluster that include different node types + on the cluster, + security configurations, fault, and upgrade domain topologies, etc. + + :param configuration_api_version: The API version of the Standalone cluster json configuration. + :type configuration_api_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterConfiguration, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterConfiguration + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterConfiguration"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_configuration.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ConfigurationApiVersion'] = self._serialize.query("configuration_api_version", configuration_api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterConfiguration', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_configuration.metadata = {'url': '/$/GetClusterConfiguration'} # type: ignore + + async def get_cluster_configuration_upgrade_status( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ClusterConfigurationUpgradeStatusInfo": + """Get the cluster configuration upgrade status of a Service Fabric standalone cluster. + + Get the cluster configuration upgrade status details of a Service Fabric standalone cluster. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterConfigurationUpgradeStatusInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterConfigurationUpgradeStatusInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterConfigurationUpgradeStatusInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_configuration_upgrade_status.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterConfigurationUpgradeStatusInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_configuration_upgrade_status.metadata = {'url': '/$/GetClusterConfigurationUpgradeStatus'} # type: ignore + + async def get_upgrade_orchestration_service_state( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.UpgradeOrchestrationServiceState": + """Get the service state of Service Fabric Upgrade Orchestration Service. + + Get the service state of Service Fabric Upgrade Orchestration Service. This API is internally + used for support purposes. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UpgradeOrchestrationServiceState, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceState + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UpgradeOrchestrationServiceState"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_upgrade_orchestration_service_state.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UpgradeOrchestrationServiceState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_upgrade_orchestration_service_state.metadata = {'url': '/$/GetUpgradeOrchestrationServiceState'} # type: ignore + + async def set_upgrade_orchestration_service_state( + self, + timeout: Optional[int] = 60, + service_state: Optional[str] = None, + **kwargs + ) -> "_models.UpgradeOrchestrationServiceStateSummary": + """Update the service state of Service Fabric Upgrade Orchestration Service. + + Update the service state of Service Fabric Upgrade Orchestration Service. This API is + internally used for support purposes. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param service_state: The state of Service Fabric Upgrade Orchestration Service. + :type service_state: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UpgradeOrchestrationServiceStateSummary, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceStateSummary + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UpgradeOrchestrationServiceStateSummary"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _upgrade_orchestration_service_state = _models.UpgradeOrchestrationServiceState(service_state=service_state) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.set_upgrade_orchestration_service_state.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_upgrade_orchestration_service_state, 'UpgradeOrchestrationServiceState') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UpgradeOrchestrationServiceStateSummary', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + set_upgrade_orchestration_service_state.metadata = {'url': '/$/SetUpgradeOrchestrationServiceState'} # type: ignore + + async def provision_cluster( + self, + timeout: Optional[int] = 60, + code_file_path: Optional[str] = None, + cluster_manifest_file_path: Optional[str] = None, + **kwargs + ) -> None: + """Provision the code or configuration packages of a Service Fabric cluster. + + Validate and provision the code or configuration packages of a Service Fabric cluster. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param code_file_path: The cluster code package file path. + :type code_file_path: str + :param cluster_manifest_file_path: The cluster manifest file path. + :type cluster_manifest_file_path: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _provision_fabric_description = _models.ProvisionFabricDescription(code_file_path=code_file_path, cluster_manifest_file_path=cluster_manifest_file_path) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.provision_cluster.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_provision_fabric_description, 'ProvisionFabricDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + provision_cluster.metadata = {'url': '/$/Provision'} # type: ignore + + async def unprovision_cluster( + self, + timeout: Optional[int] = 60, + code_version: Optional[str] = None, + config_version: Optional[str] = None, + **kwargs + ) -> None: + """Unprovision the code or configuration packages of a Service Fabric cluster. + + It is supported to unprovision code and configuration separately. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param code_version: The cluster code package version. + :type code_version: str + :param config_version: The cluster manifest version. + :type config_version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _unprovision_fabric_description = _models.UnprovisionFabricDescription(code_version=code_version, config_version=config_version) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.unprovision_cluster.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_unprovision_fabric_description, 'UnprovisionFabricDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + unprovision_cluster.metadata = {'url': '/$/Unprovision'} # type: ignore + + async def rollback_cluster_upgrade( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Roll back the upgrade of a Service Fabric cluster. + + Roll back the code or configuration upgrade of a Service Fabric cluster. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.rollback_cluster_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + rollback_cluster_upgrade.metadata = {'url': '/$/RollbackUpgrade'} # type: ignore + + async def resume_cluster_upgrade( + self, + upgrade_domain: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Make the cluster upgrade move on to the next upgrade domain. + + Make the cluster code or configuration upgrade move on to the next upgrade domain if + appropriate. + + :param upgrade_domain: The next upgrade domain for this cluster upgrade. + :type upgrade_domain: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _resume_cluster_upgrade_description = _models.ResumeClusterUpgradeDescription(upgrade_domain=upgrade_domain) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.resume_cluster_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_resume_cluster_upgrade_description, 'ResumeClusterUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_cluster_upgrade.metadata = {'url': '/$/MoveToNextUpgradeDomain'} # type: ignore + + async def start_cluster_upgrade( + self, + start_cluster_upgrade_description: "_models.StartClusterUpgradeDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Start upgrading the code or configuration version of a Service Fabric cluster. + + Validate the supplied upgrade parameters and start upgrading the code or configuration version + of a Service Fabric cluster if the parameters are valid. + + :param start_cluster_upgrade_description: Describes the parameters for starting a cluster + upgrade. + :type start_cluster_upgrade_description: ~azure.servicefabric.models.StartClusterUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_cluster_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(start_cluster_upgrade_description, 'StartClusterUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_cluster_upgrade.metadata = {'url': '/$/Upgrade'} # type: ignore + + async def start_cluster_configuration_upgrade( + self, + cluster_configuration_upgrade_description: "_models.ClusterConfigurationUpgradeDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Start upgrading the configuration of a Service Fabric standalone cluster. + + Validate the supplied configuration upgrade parameters and start upgrading the cluster + configuration if the parameters are valid. + + :param cluster_configuration_upgrade_description: Parameters for a standalone cluster + configuration upgrade. + :type cluster_configuration_upgrade_description: ~azure.servicefabric.models.ClusterConfigurationUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_cluster_configuration_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(cluster_configuration_upgrade_description, 'ClusterConfigurationUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_cluster_configuration_upgrade.metadata = {'url': '/$/StartClusterConfigurationUpgrade'} # type: ignore + + async def update_cluster_upgrade( + self, + update_cluster_upgrade_description: "_models.UpdateClusterUpgradeDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Update the upgrade parameters of a Service Fabric cluster upgrade. + + Update the upgrade parameters used during a Service Fabric cluster upgrade. + + :param update_cluster_upgrade_description: Parameters for updating a cluster upgrade. + :type update_cluster_upgrade_description: ~azure.servicefabric.models.UpdateClusterUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_cluster_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(update_cluster_upgrade_description, 'UpdateClusterUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + update_cluster_upgrade.metadata = {'url': '/$/UpdateUpgrade'} # type: ignore + + async def get_aad_metadata( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.AadMetadataObject": + """Gets the Azure Active Directory metadata used for secured connection to cluster. + + Gets the Azure Active Directory metadata used for secured connection to cluster. + This API is not supposed to be called separately. It provides information needed to set up an + Azure Active Directory secured connection with a Service Fabric cluster. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: AadMetadataObject, or the result of cls(response) + :rtype: ~azure.servicefabric.models.AadMetadataObject + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.AadMetadataObject"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_aad_metadata.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('AadMetadataObject', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_aad_metadata.metadata = {'url': '/$/GetAadMetadata'} # type: ignore + + async def get_cluster_version( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ClusterVersion": + """Get the current Service Fabric cluster version. + + If a cluster upgrade is happening, then this API will return the lowest (older) version of the + current and target cluster runtime versions. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterVersion, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterVersion + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterVersion"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_version.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterVersion', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_version.metadata = {'url': '/$/GetClusterVersion'} # type: ignore + + async def get_cluster_load( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ClusterLoadInfo": + """Gets the load of a Service Fabric cluster. + + Retrieves the load information of a Service Fabric cluster for all the metrics that have load + or capacity defined. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterLoadInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterLoadInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterLoadInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_load.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterLoadInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_load.metadata = {'url': '/$/GetLoadInformation'} # type: ignore + + async def toggle_verbose_service_placement_health_reporting( + self, + enabled: bool, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Changes the verbosity of service placement health reporting. + + If verbosity is set to true, then detailed health reports will be generated when replicas + cannot be placed or dropped. + If verbosity is set to false, then no health reports will be generated when replicas cannot be + placed or dropped. + + :param enabled: The verbosity of service placement health reporting. + :type enabled: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.toggle_verbose_service_placement_health_reporting.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['Enabled'] = self._serialize.query("enabled", enabled, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + toggle_verbose_service_placement_health_reporting.metadata = {'url': '/$/ToggleVerboseServicePlacementHealthReporting'} # type: ignore + + async def get_node_info_list( + self, + continuation_token_parameter: Optional[str] = None, + node_status_filter: Optional[Union[str, "_models.NodeStatusFilter"]] = "default", + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedNodeInfoList": + """Gets the list of nodes in the Service Fabric cluster. + + The response includes the name, status, ID, health, uptime, and other details about the nodes. + + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param node_status_filter: Allows filtering the nodes based on the NodeStatus. Only the nodes + that are matching the specified filter value will be returned. The filter value can be one of + the following. + :type node_status_filter: str or ~azure.servicefabric.models.NodeStatusFilter + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedNodeInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedNodeInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedNodeInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if node_status_filter is not None: + query_parameters['NodeStatusFilter'] = self._serialize.query("node_status_filter", node_status_filter, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedNodeInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_info_list.metadata = {'url': '/Nodes'} # type: ignore + + async def get_node_info( + self, + node_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional["_models.NodeInfo"]: + """Gets the information about a specific node in the Service Fabric cluster. + + The response includes the name, status, ID, health, uptime, and other details about the node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NodeInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('NodeInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_info.metadata = {'url': '/Nodes/{nodeName}'} # type: ignore + + async def get_node_health( + self, + node_name: str, + events_health_state_filter: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.NodeHealth": + """Gets the health of a Service Fabric node. + + Gets the health of a Service Fabric node. Use EventsHealthStateFilter to filter the collection + of health events reported on the node based on the health state. If the node that you specify + by name does not exist in the health store, this returns an error. + + :param node_name: The name of the node. + :type node_name: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('NodeHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_health.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} # type: ignore + + async def get_node_health_using_policy( + self, + node_name: str, + events_health_state_filter: Optional[int] = 0, + timeout: Optional[int] = 60, + cluster_health_policy: Optional["_models.ClusterHealthPolicy"] = None, + **kwargs + ) -> "_models.NodeHealth": + """Gets the health of a Service Fabric node, by using the specified health policy. + + Gets the health of a Service Fabric node. Use EventsHealthStateFilter to filter the collection + of health events reported on the node based on the health state. Use ClusterHealthPolicy in the + POST body to override the health policies used to evaluate the health. If the node that you + specify by name does not exist in the health store, this returns an error. + + :param node_name: The name of the node. + :type node_name: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param cluster_health_policy: Describes the health policies used to evaluate the health of a + cluster or node. If not present, the health evaluation uses the health policy from cluster + manifest or the default health policy. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_node_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if cluster_health_policy is not None: + body_content = self._serialize.body(cluster_health_policy, 'ClusterHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('NodeHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} # type: ignore + + async def report_node_health( + self, + node_name: str, + health_information: "_models.HealthInformation", + immediate: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Sends a health report on the Service Fabric node. + + Reports health state of the specified Service Fabric node. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway node, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetNodeHealth and check that the + report appears in the HealthEvents section. + + :param node_name: The name of the node. + :type node_name: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_node_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_node_health.metadata = {'url': '/Nodes/{nodeName}/$/ReportHealth'} # type: ignore + + async def get_node_load_info( + self, + node_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.NodeLoadInfo": + """Gets the load information of a Service Fabric node. + + Retrieves the load information of a Service Fabric node for all the metrics that have load or + capacity defined. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeLoadInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeLoadInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeLoadInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_load_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('NodeLoadInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_load_info.metadata = {'url': '/Nodes/{nodeName}/$/GetLoadInformation'} # type: ignore + + async def disable_node( + self, + node_name: str, + timeout: Optional[int] = 60, + deactivation_intent: Optional[Union[str, "_models.DeactivationIntent"]] = None, + **kwargs + ) -> None: + """Deactivate a Service Fabric cluster node with the specified deactivation intent. + + Deactivate a Service Fabric cluster node with the specified deactivation intent. Once the + deactivation is in progress, the deactivation intent can be increased, but not decreased (for + example, a node that is deactivated with the Pause intent can be deactivated further with + Restart, but not the other way around. Nodes may be reactivated using the Activate a node + operation any time after they are deactivated. If the deactivation is not complete, this will + cancel the deactivation. A node that goes down and comes back up while deactivated will still + need to be reactivated before services will be placed on that node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param deactivation_intent: Describes the intent or reason for deactivating the node. The + possible values are following. + :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _deactivation_intent_description = _models.DeactivationIntentDescription(deactivation_intent=deactivation_intent) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.disable_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_deactivation_intent_description, 'DeactivationIntentDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + disable_node.metadata = {'url': '/Nodes/{nodeName}/$/Deactivate'} # type: ignore + + async def enable_node( + self, + node_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Activate a Service Fabric cluster node that is currently deactivated. + + Activates a Service Fabric cluster node that is currently deactivated. Once activated, the node + will again become a viable target for placing new replicas, and any deactivated replicas + remaining on the node will be reactivated. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.enable_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + enable_node.metadata = {'url': '/Nodes/{nodeName}/$/Activate'} # type: ignore + + async def remove_node_state( + self, + node_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Notifies Service Fabric that the persisted state on a node has been permanently removed or lost. + + This implies that it is not possible to recover the persisted state of that node. This + generally happens if a hard disk has been wiped clean, or if a hard disk crashes. The node has + to be down for this operation to be successful. This operation lets Service Fabric know that + the replicas on that node no longer exist, and that Service Fabric should stop waiting for + those replicas to come back up. Do not run this cmdlet if the state on the node has not been + removed and the node can come back up with its state intact. Starting from Service Fabric 6.5, + in order to use this API for seed nodes, please change the seed nodes to regular (non-seed) + nodes and then invoke this API to remove the node state. If the cluster is running on Azure, + after the seed node goes down, Service Fabric will try to change it to a non-seed node + automatically. To make this happen, make sure the number of non-seed nodes in the primary node + type is no less than the number of Down seed nodes. If necessary, add more nodes to the primary + node type to achieve this. For standalone cluster, if the Down seed node is not expected to + come back up with its state intact, please remove the node from the cluster, see + https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-windows-server-add-remove-nodes. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.remove_node_state.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_node_state.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeState'} # type: ignore + + async def restart_node( + self, + node_name: str, + timeout: Optional[int] = 60, + node_instance_id: str = "0", + create_fabric_dump: Optional[Union[str, "_models.CreateFabricDump"]] = "False", + **kwargs + ) -> None: + """Restarts a Service Fabric cluster node. + + Restarts a Service Fabric cluster node that is already started. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param node_instance_id: The instance ID of the target node. If instance ID is specified the + node is restarted only if it matches with the current instance of the node. A default value of + "0" would match any instance ID. The instance ID can be obtained using get node query. + :type node_instance_id: str + :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is + case-sensitive. + :type create_fabric_dump: str or ~azure.servicefabric.models.CreateFabricDump + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _restart_node_description = _models.RestartNodeDescription(node_instance_id=node_instance_id, create_fabric_dump=create_fabric_dump) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.restart_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_restart_node_description, 'RestartNodeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + restart_node.metadata = {'url': '/Nodes/{nodeName}/$/Restart'} # type: ignore + + async def remove_configuration_overrides( + self, + node_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Removes configuration overrides on the specified node. + + This api allows removing all existing configuration overrides on specified node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.remove_configuration_overrides.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/RemoveConfigurationOverrides'} # type: ignore + + async def get_configuration_overrides( + self, + node_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> List["_models.ConfigParameterOverride"]: + """Gets the list of configuration overrides on the specified node. + + This api allows getting all existing configuration overrides on the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ConfigParameterOverride, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ConfigParameterOverride] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConfigParameterOverride"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_configuration_overrides.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ConfigParameterOverride]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/GetConfigurationOverrides'} # type: ignore + + async def add_configuration_parameter_overrides( + self, + node_name: str, + config_parameter_override_list: List["_models.ConfigParameterOverride"], + force: Optional[bool] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Adds the list of configuration overrides on the specified node. + + This api allows adding all existing configuration overrides on the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param config_parameter_override_list: Description for adding list of configuration overrides. + :type config_parameter_override_list: list[~azure.servicefabric.models.ConfigParameterOverride] + :param force: Force adding configuration overrides on specified nodes. + :type force: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.add_configuration_parameter_overrides.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force is not None: + query_parameters['Force'] = self._serialize.query("force", force, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(config_parameter_override_list, '[ConfigParameterOverride]') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + add_configuration_parameter_overrides.metadata = {'url': '/Nodes/{nodeName}/$/AddConfigurationParameterOverrides'} # type: ignore + + async def remove_node_tags( + self, + node_name: str, + node_tags: List[str], + **kwargs + ) -> None: + """Removes the list of tags from the specified node. + + This api allows removing set of tags from the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param node_tags: Description for adding list of node tags. + :type node_tags: list[str] + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.remove_node_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(node_tags, '[str]') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeTags'} # type: ignore + + async def add_node_tags( + self, + node_name: str, + node_tags: List[str], + **kwargs + ) -> None: + """Adds the list of tags on the specified node. + + This api allows adding tags to the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param node_tags: Description for adding list of node tags. + :type node_tags: list[str] + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.add_node_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(node_tags, '[str]') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + add_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/AddNodeTags'} # type: ignore + + async def get_application_type_info_list( + self, + application_type_definition_kind_filter: Optional[int] = 0, + exclude_application_parameters: Optional[bool] = False, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedApplicationTypeInfoList": + """Gets the list of application types in the Service Fabric cluster. + + Returns the information about the application types that are provisioned or in the process of + being provisioned in the Service Fabric cluster. Each version of an application type is + returned as one application type. The response includes the name, version, status, and other + details about the application type. This is a paged query, meaning that if not all of the + application types fit in a page, one page of results is returned as well as a continuation + token, which can be used to get the next page. For example, if there are 10 application types + but a page only fits the first three application types, or if max results is set to 3, then + three is returned. To access the rest of the results, retrieve subsequent pages by using the + returned continuation token in the next query. An empty continuation token is returned if there + are no subsequent pages. + + :param application_type_definition_kind_filter: Used to filter on ApplicationTypeDefinitionKind + which is the mechanism used to define a Service Fabric application type. + + + * Default - Default value, which performs the same function as selecting "All". The value is + 0. + * All - Filter that matches input with any ApplicationTypeDefinitionKind value. The value is + 65535. + * ServiceFabricApplicationPackage - Filter that matches input with + ApplicationTypeDefinitionKind value ServiceFabricApplicationPackage. The value is 1. + * Compose - Filter that matches input with ApplicationTypeDefinitionKind value Compose. The + value is 2. + :type application_type_definition_kind_filter: int + :param exclude_application_parameters: The flag that specifies whether application parameters + will be excluded from the result. + :type exclude_application_parameters: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedApplicationTypeInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationTypeInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_type_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if application_type_definition_kind_filter is not None: + query_parameters['ApplicationTypeDefinitionKindFilter'] = self._serialize.query("application_type_definition_kind_filter", application_type_definition_kind_filter, 'int') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedApplicationTypeInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_type_info_list.metadata = {'url': '/ApplicationTypes'} # type: ignore + + async def get_application_type_info_list_by_name( + self, + application_type_name: str, + application_type_version: Optional[str] = None, + exclude_application_parameters: Optional[bool] = False, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedApplicationTypeInfoList": + """Gets the list of application types in the Service Fabric cluster matching exactly the specified name. + + Returns the information about the application types that are provisioned or in the process of + being provisioned in the Service Fabric cluster. These results are of application types whose + name match exactly the one specified as the parameter, and which comply with the given query + parameters. All versions of the application type matching the application type name are + returned, with each version returned as one application type. The response includes the name, + version, status, and other details about the application type. This is a paged query, meaning + that if not all of the application types fit in a page, one page of results is returned as well + as a continuation token, which can be used to get the next page. For example, if there are 10 + application types but a page only fits the first three application types, or if max results is + set to 3, then three is returned. To access the rest of the results, retrieve subsequent pages + by using the returned continuation token in the next query. An empty continuation token is + returned if there are no subsequent pages. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param exclude_application_parameters: The flag that specifies whether application parameters + will be excluded from the result. + :type exclude_application_parameters: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedApplicationTypeInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationTypeInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_type_info_list_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if application_type_version is not None: + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedApplicationTypeInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_type_info_list_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}'} # type: ignore + + async def provision_application_type( + self, + provision_application_type_description_base_required_body_param: "_models.ProvisionApplicationTypeDescriptionBase", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Provisions or registers a Service Fabric application type with the cluster using the '.sfpkg' package in the external store or using the application package in the image store. + + Provisions a Service Fabric application type with the cluster. The provision is required before + any new applications can be instantiated. + The provision operation can be performed either on the application package specified by the + relativePathInImageStore, or by using the URI of the external '.sfpkg'. + + :param provision_application_type_description_base_required_body_param: The base type of + provision application type description which supports either image store-based provision or + external store-based provision. + :type provision_application_type_description_base_required_body_param: ~azure.servicefabric.models.ProvisionApplicationTypeDescriptionBase + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.provision_application_type.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(provision_application_type_description_base_required_body_param, 'ProvisionApplicationTypeDescriptionBase') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + provision_application_type.metadata = {'url': '/ApplicationTypes/$/Provision'} # type: ignore + + async def unprovision_application_type( + self, + application_type_name: str, + application_type_version: str, + timeout: Optional[int] = 60, + async_parameter: Optional[bool] = None, + **kwargs + ) -> None: + """Removes or unregisters a Service Fabric application type from the cluster. + + This operation can only be performed if all application instances of the application type have + been deleted. Once the application type is unregistered, no new application instances can be + created for this particular application type. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type as defined in the + application manifest. + :type application_type_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param async_parameter: The flag indicating whether or not unprovision should occur + asynchronously. When set to true, the unprovision operation returns when the request is + accepted by the system, and the unprovision operation continues without any timeout limit. The + default value is false. However, we recommend setting it to true for large application packages + that were provisioned. + :type async_parameter: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _unprovision_application_type_description_info = _models.UnprovisionApplicationTypeDescriptionInfo(application_type_version=application_type_version, async_property=async_parameter) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.unprovision_application_type.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_unprovision_application_type_description_info, 'UnprovisionApplicationTypeDescriptionInfo') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + unprovision_application_type.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/Unprovision'} # type: ignore + + async def get_service_type_info_list( + self, + application_type_name: str, + application_type_version: str, + timeout: Optional[int] = 60, + **kwargs + ) -> List["_models.ServiceTypeInfo"]: + """Gets the list containing the information about service types that are supported by a provisioned application type in a Service Fabric cluster. + + Gets the list containing the information about service types that are supported by a + provisioned application type in a Service Fabric cluster. The provided application type must + exist. Otherwise, a 404 status is returned. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ServiceTypeInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ServiceTypeInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceTypeInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_type_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ServiceTypeInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_type_info_list.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes'} # type: ignore + + async def get_service_type_info_by_name( + self, + application_type_name: str, + application_type_version: str, + service_type_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional["_models.ServiceTypeInfo"]: + """Gets the information about a specific service type that is supported by a provisioned application type in a Service Fabric cluster. + + Gets the information about a specific service type that is supported by a provisioned + application type in a Service Fabric cluster. The provided application type must exist. + Otherwise, a 404 status is returned. A 204 response is returned if the specified service type + is not found in the cluster. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param service_type_name: Specifies the name of a Service Fabric service type. + :type service_type_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceTypeInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceTypeInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServiceTypeInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_type_info_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceTypeInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_type_info_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes/{serviceTypeName}'} # type: ignore + + async def get_service_manifest( + self, + application_type_name: str, + application_type_version: str, + service_manifest_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ServiceTypeManifest": + """Gets the manifest describing a service type. + + Gets the manifest describing a service type. The response contains the service manifest XML as + a string. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceTypeManifest, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceTypeManifest + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceTypeManifest"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_manifest.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceTypeManifest', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceManifest'} # type: ignore + + async def get_deployed_service_type_info_list( + self, + node_name: str, + application_id: str, + service_manifest_name: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> List["_models.DeployedServiceTypeInfo"]: + """Gets the list containing the information about service types from the applications deployed on a node in a Service Fabric cluster. + + Gets the list containing the information about service types from the applications deployed on + a node in a Service Fabric cluster. The response includes the name of the service type, its + registration status, the code package that registered it and activation ID of the service + package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_manifest_name: The name of the service manifest to filter the list of deployed + service type information. If specified, the response will only contain the information about + service types that are defined in this service manifest. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServiceTypeInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedServiceTypeInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_type_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[DeployedServiceTypeInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_type_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes'} # type: ignore + + async def get_deployed_service_type_info_by_name( + self, + node_name: str, + application_id: str, + service_type_name: str, + service_manifest_name: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional[List["_models.DeployedServiceTypeInfo"]]: + """Gets the information about a specified service type of the application deployed on a node in a Service Fabric cluster. + + Gets the list containing the information about a specific service type from the applications + deployed on a node in a Service Fabric cluster. The response includes the name of the service + type, its registration status, the code package that registered it and activation ID of the + service package. Each entry represents one activation of a service type, differentiated by the + activation ID. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_type_name: Specifies the name of a Service Fabric service type. + :type service_type_name: str + :param service_manifest_name: The name of the service manifest to filter the list of deployed + service type information. If specified, the response will only contain the information about + service types that are defined in this service manifest. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServiceTypeInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServiceTypeInfo"]]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_type_info_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServiceTypeInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_type_info_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes/{serviceTypeName}'} # type: ignore + + async def create_application( + self, + application_description: "_models.ApplicationDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Creates a Service Fabric application. + + Creates a Service Fabric application using the specified description. + + :param application_description: Description for creating an application. + :type application_description: ~azure.servicefabric.models.ApplicationDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_application.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(application_description, 'ApplicationDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_application.metadata = {'url': '/Applications/$/Create'} # type: ignore + + async def delete_application( + self, + application_id: str, + force_remove: Optional[bool] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Deletes an existing Service Fabric application. + + An application must be created before it can be deleted. Deleting an application will delete + all services that are part of that application. By default, Service Fabric will try to close + service replicas in a graceful manner and then delete the service. However, if a service is + having issues closing the replica gracefully, the delete operation may take a long time or get + stuck. Use the optional ForceRemove flag to skip the graceful close sequence and forcefully + delete the application and all of its services. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param force_remove: Remove a Service Fabric application or service forcefully without going + through the graceful shutdown sequence. This parameter can be used to forcefully delete an + application or service for which delete is timing out due to issues in the service code that + prevents graceful close of replicas. + :type force_remove: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_application.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force_remove is not None: + query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_application.metadata = {'url': '/Applications/{applicationId}/$/Delete'} # type: ignore + + async def get_application_load_info( + self, + application_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional["_models.ApplicationLoadInfo"]: + """Gets load information about a Service Fabric application. + + Returns the load information about the application that was created or in the process of being + created in the Service Fabric cluster and whose name matches the one specified as the + parameter. The response includes the name, minimum nodes, maximum nodes, the number of nodes + the application is occupying currently, and application load metric information about the + application. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationLoadInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationLoadInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationLoadInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_load_info.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationLoadInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_load_info.metadata = {'url': '/Applications/{applicationId}/$/GetLoadInformation'} # type: ignore + + async def get_application_info_list( + self, + application_definition_kind_filter: Optional[int] = 0, + application_type_name: Optional[str] = None, + exclude_application_parameters: Optional[bool] = False, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedApplicationInfoList": + """Gets the list of applications created in the Service Fabric cluster that match the specified filters. + + Gets the information about the applications that were created or in the process of being + created in the Service Fabric cluster and match the specified filters. The response includes + the name, type, status, parameters, and other details about the application. If the + applications do not fit in a page, one page of results is returned as well as a continuation + token, which can be used to get the next page. Filters ApplicationTypeName and + ApplicationDefinitionKindFilter cannot be specified at the same time. + + :param application_definition_kind_filter: Used to filter on ApplicationDefinitionKind, which + is the mechanism used to define a Service Fabric application. + + + * Default - Default value, which performs the same function as selecting "All". The value is + 0. + * All - Filter that matches input with any ApplicationDefinitionKind value. The value is + 65535. + * ServiceFabricApplicationDescription - Filter that matches input with + ApplicationDefinitionKind value ServiceFabricApplicationDescription. The value is 1. + * Compose - Filter that matches input with ApplicationDefinitionKind value Compose. The value + is 2. + :type application_definition_kind_filter: int + :param application_type_name: The application type name used to filter the applications to + query for. This value should not contain the application type version. + :type application_type_name: str + :param exclude_application_parameters: The flag that specifies whether application parameters + will be excluded from the result. + :type exclude_application_parameters: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedApplicationInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedApplicationInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if application_definition_kind_filter is not None: + query_parameters['ApplicationDefinitionKindFilter'] = self._serialize.query("application_definition_kind_filter", application_definition_kind_filter, 'int') + if application_type_name is not None: + query_parameters['ApplicationTypeName'] = self._serialize.query("application_type_name", application_type_name, 'str') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedApplicationInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_info_list.metadata = {'url': '/Applications'} # type: ignore + + async def get_application_info( + self, + application_id: str, + exclude_application_parameters: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional["_models.ApplicationInfo"]: + """Gets information about a Service Fabric application. + + Returns the information about the application that was created or in the process of being + created in the Service Fabric cluster and whose name matches the one specified as the + parameter. The response includes the name, type, status, parameters, and other details about + the application. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param exclude_application_parameters: The flag that specifies whether application parameters + will be excluded from the result. + :type exclude_application_parameters: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_info.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_info.metadata = {'url': '/Applications/{applicationId}'} # type: ignore + + async def get_application_health( + self, + application_id: str, + events_health_state_filter: Optional[int] = 0, + deployed_applications_health_state_filter: Optional[int] = 0, + services_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ApplicationHealth": + """Gets the health of the service fabric application. + + Returns the heath state of the service fabric application. The response reports either Ok, + Error or Warning health state. If the entity is not found in the health store, it will return + Error. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param deployed_applications_health_state_filter: Allows filtering of the deployed applications + health state objects returned in the result of application health query based on their health + state. + The possible values for this parameter include integer value of one of the following health + states. Only deployed applications that match the filter will be returned. + All deployed applications are used to evaluate the aggregated health state. If not specified, + all entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values, obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of deployed applications with + HealthState value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type deployed_applications_health_state_filter: int + :param services_health_state_filter: Allows filtering of the services health state objects + returned in the result of services health query based on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only services that match the filter are returned. All services are used to evaluate the + aggregated health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, + obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health + state of services with HealthState value of OK (2) and Warning (4) will be returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type services_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_health.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_applications_health_state_filter is not None: + query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') + if services_health_state_filter is not None: + query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_health.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} # type: ignore + + async def get_application_health_using_policy( + self, + application_id: str, + events_health_state_filter: Optional[int] = 0, + deployed_applications_health_state_filter: Optional[int] = 0, + services_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, + **kwargs + ) -> "_models.ApplicationHealth": + """Gets the health of a Service Fabric application using the specified policy. + + Gets the health of a Service Fabric application. Use EventsHealthStateFilter to filter the + collection of health events reported on the node based on the health state. Use + ClusterHealthPolicies to override the health policies used to evaluate the health. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param deployed_applications_health_state_filter: Allows filtering of the deployed applications + health state objects returned in the result of application health query based on their health + state. + The possible values for this parameter include integer value of one of the following health + states. Only deployed applications that match the filter will be returned. + All deployed applications are used to evaluate the aggregated health state. If not specified, + all entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values, obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of deployed applications with + HealthState value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type deployed_applications_health_state_filter: int + :param services_health_state_filter: Allows filtering of the services health state objects + returned in the result of services health query based on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only services that match the filter are returned. All services are used to evaluate the + aggregated health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, + obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health + state of services with HealthState value of OK (2) and Warning (4) will be returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type services_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_application_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_applications_health_state_filter is not None: + query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') + if services_health_state_filter is not None: + query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_health_using_policy.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} # type: ignore + + async def report_application_health( + self, + application_id: str, + health_information: "_models.HealthInformation", + immediate: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Sends a health report on the Service Fabric application. + + Reports health state of the specified Service Fabric application. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway Application, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, get application health and check + that the report appears in the HealthEvents section. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_application_health.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_application_health.metadata = {'url': '/Applications/{applicationId}/$/ReportHealth'} # type: ignore + + async def start_application_upgrade( + self, + application_id: str, + application_upgrade_description: "_models.ApplicationUpgradeDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Starts upgrading an application in the Service Fabric cluster. + + Validates the supplied application upgrade parameters and starts upgrading the application if + the parameters are valid. + Note, `ApplicationParameter + `_\ + s are not preserved across an application upgrade. + In order to preserve current application parameters, the user should get the parameters using + `GetApplicationInfo <./GetApplicationInfo.md>`_ operation first and pass them into the upgrade + API call as shown in the example. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param application_upgrade_description: Parameters for an application upgrade. + :type application_upgrade_description: ~azure.servicefabric.models.ApplicationUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(application_upgrade_description, 'ApplicationUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/Upgrade'} # type: ignore + + async def get_application_upgrade( + self, + application_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ApplicationUpgradeProgressInfo": + """Gets details for the latest upgrade performed on this application. + + Returns information about the state of the latest application upgrade along with details to aid + debugging application health issues. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationUpgradeProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationUpgradeProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationUpgradeProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationUpgradeProgressInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/GetUpgradeProgress'} # type: ignore + + async def update_application_upgrade( + self, + application_id: str, + application_upgrade_update_description: "_models.ApplicationUpgradeUpdateDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Updates an ongoing application upgrade in the Service Fabric cluster. + + Updates the parameters of an ongoing application upgrade from the ones specified at the time of + starting the application upgrade. This may be required to mitigate stuck application upgrades + due to incorrect parameters or issues in the application to make progress. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param application_upgrade_update_description: Parameters for updating an existing application + upgrade. + :type application_upgrade_update_description: ~azure.servicefabric.models.ApplicationUpgradeUpdateDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(application_upgrade_update_description, 'ApplicationUpgradeUpdateDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + update_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/UpdateUpgrade'} # type: ignore + + async def resume_application_upgrade( + self, + application_id: str, + upgrade_domain_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Resumes upgrading an application in the Service Fabric cluster. + + Resumes an unmonitored manual Service Fabric application upgrade. Service Fabric upgrades one + upgrade domain at a time. For unmonitored manual upgrades, after Service Fabric finishes an + upgrade domain, it waits for you to call this API before proceeding to the next upgrade domain. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param upgrade_domain_name: The name of the upgrade domain in which to resume the upgrade. + :type upgrade_domain_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _resume_application_upgrade_description = _models.ResumeApplicationUpgradeDescription(upgrade_domain_name=upgrade_domain_name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.resume_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_resume_application_upgrade_description, 'ResumeApplicationUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/MoveToNextUpgradeDomain'} # type: ignore + + async def rollback_application_upgrade( + self, + application_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Starts rolling back the currently on-going upgrade of an application in the Service Fabric cluster. + + Starts rolling back the current application upgrade to the previous version. This API can only + be used to roll back the current in-progress upgrade that is rolling forward to new version. If + the application is not currently being upgraded use StartApplicationUpgrade API to upgrade it + to desired version, including rolling back to a previous version. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.rollback_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + rollback_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/RollbackUpgrade'} # type: ignore + + async def get_deployed_application_info_list( + self, + node_name: str, + timeout: Optional[int] = 60, + include_health_state: Optional[bool] = False, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + **kwargs + ) -> "_models.PagedDeployedApplicationInfoList": + """Gets the list of applications deployed on a Service Fabric node. + + Gets the list of applications deployed on a Service Fabric node. The results do not include + information about deployed system applications unless explicitly queried for by ID. Results + encompass deployed applications in active, activating, and downloading states. This query + requires that the node name corresponds to a node on the cluster. The query fails if the + provided node name does not point to any active Service Fabric nodes on the cluster. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param include_health_state: Include the health state of an entity. + If this parameter is false or not specified, then the health state returned is "Unknown". + When set to true, the query goes in parallel to the node and the health system service before + the results are merged. + As a result, the query is more expensive and may take a longer time. + :type include_health_state: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedDeployedApplicationInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedDeployedApplicationInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedDeployedApplicationInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_application_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if include_health_state is not None: + query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedDeployedApplicationInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_application_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications'} # type: ignore + + async def get_deployed_application_info( + self, + node_name: str, + application_id: str, + timeout: Optional[int] = 60, + include_health_state: Optional[bool] = False, + **kwargs + ) -> Optional["_models.DeployedApplicationInfo"]: + """Gets the information about an application deployed on a Service Fabric node. + + This query returns system application information if the application ID provided is for system + application. Results encompass deployed applications in active, activating, and downloading + states. This query requires that the node name corresponds to a node on the cluster. The query + fails if the provided node name does not point to any active Service Fabric nodes on the + cluster. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param include_health_state: Include the health state of an entity. + If this parameter is false or not specified, then the health state returned is "Unknown". + When set to true, the query goes in parallel to the node and the health system service before + the results are merged. + As a result, the query is more expensive and may take a longer time. + :type include_health_state: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedApplicationInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedApplicationInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.DeployedApplicationInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_application_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if include_health_state is not None: + query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DeployedApplicationInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_application_info.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}'} # type: ignore + + async def get_deployed_application_health( + self, + node_name: str, + application_id: str, + events_health_state_filter: Optional[int] = 0, + deployed_service_packages_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.DeployedApplicationHealth": + """Gets the information about health of an application deployed on a Service Fabric node. + + Gets the information about health of an application deployed on a Service Fabric node. Use + EventsHealthStateFilter to optionally filter for the collection of HealthEvent objects reported + on the deployed application based on health state. Use DeployedServicePackagesHealthStateFilter + to optionally filter for DeployedServicePackageHealth children based on health state. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param deployed_service_packages_health_state_filter: Allows filtering of the deployed service + package health state objects returned in the result of deployed application health query based + on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only deployed service packages that match the filter are returned. All deployed service + packages are used to evaluate the aggregated health state of the deployed application. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value can be a combination of these + values, obtained using the bitwise 'OR' operator. + For example, if the provided value is 6 then health state of service packages with HealthState + value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type deployed_service_packages_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedApplicationHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedApplicationHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedApplicationHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_application_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_service_packages_health_state_filter is not None: + query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedApplicationHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} # type: ignore + + async def get_deployed_application_health_using_policy( + self, + node_name: str, + application_id: str, + events_health_state_filter: Optional[int] = 0, + deployed_service_packages_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, + **kwargs + ) -> "_models.DeployedApplicationHealth": + """Gets the information about health of an application deployed on a Service Fabric node. using the specified policy. + + Gets the information about health of an application deployed on a Service Fabric node using the + specified policy. Use EventsHealthStateFilter to optionally filter for the collection of + HealthEvent objects reported on the deployed application based on health state. Use + DeployedServicePackagesHealthStateFilter to optionally filter for DeployedServicePackageHealth + children based on health state. Use ApplicationHealthPolicy to optionally override the health + policies used to evaluate the health. This API only uses 'ConsiderWarningAsError' field of the + ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the + deployed application. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param deployed_service_packages_health_state_filter: Allows filtering of the deployed service + package health state objects returned in the result of deployed application health query based + on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only deployed service packages that match the filter are returned. All deployed service + packages are used to evaluate the aggregated health state of the deployed application. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value can be a combination of these + values, obtained using the bitwise 'OR' operator. + For example, if the provided value is 6 then health state of service packages with HealthState + value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type deployed_service_packages_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedApplicationHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedApplicationHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedApplicationHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_deployed_application_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_service_packages_health_state_filter is not None: + query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedApplicationHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_application_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} # type: ignore + + async def report_deployed_application_health( + self, + node_name: str, + application_id: str, + health_information: "_models.HealthInformation", + immediate: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Sends a health report on the Service Fabric application deployed on a Service Fabric node. + + Reports health state of the application deployed on a Service Fabric node. The report must + contain the information about the source of the health report and property on which it is + reported. + The report is sent to a Service Fabric gateway Service, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, get deployed application health and + check that the report appears in the HealthEvents section. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_deployed_application_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/ReportHealth'} # type: ignore + + async def get_application_manifest( + self, + application_type_name: str, + application_type_version: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ApplicationTypeManifest": + """Gets the manifest describing an application type. + + The response contains the application manifest XML as a string. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationTypeManifest, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationTypeManifest + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationTypeManifest"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_manifest.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationTypeManifest', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetApplicationManifest'} # type: ignore + + async def get_service_info_list( + self, + application_id: str, + service_type_name: Optional[str] = None, + continuation_token_parameter: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedServiceInfoList": + """Gets the information about all services belonging to the application specified by the application ID. + + Returns the information about all services belonging to the application specified by the + application ID. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_type_name: The service type name used to filter the services to query for. + :type service_type_name: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedServiceInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedServiceInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if service_type_name is not None: + query_parameters['ServiceTypeName'] = self._serialize.query("service_type_name", service_type_name, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedServiceInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_info_list.metadata = {'url': '/Applications/{applicationId}/$/GetServices'} # type: ignore + + async def get_service_info( + self, + application_id: str, + service_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional["_models.ServiceInfo"]: + """Gets the information about the specific service belonging to the Service Fabric application. + + Returns the information about the specified service belonging to the specified Service Fabric + application. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServiceInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_info.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_info.metadata = {'url': '/Applications/{applicationId}/$/GetServices/{serviceId}'} # type: ignore + + async def get_application_name_info( + self, + service_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ApplicationNameInfo": + """Gets the name of the Service Fabric application for a service. + + Gets the name of the application for the specified service. A 404 + FABRIC_E_SERVICE_DOES_NOT_EXIST error is returned if a service with the provided service ID + does not exist. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationNameInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationNameInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationNameInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_name_info.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationNameInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_name_info.metadata = {'url': '/Services/{serviceId}/$/GetApplicationName'} # type: ignore + + async def create_service( + self, + application_id: str, + service_description: "_models.ServiceDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Creates the specified Service Fabric service. + + This api allows creating a new Service Fabric stateless or stateful service under a specified + Service Fabric application. The description for creating the service includes partitioning + information and optional properties for placement and load balancing. Some of the properties + can later be modified using ``UpdateService`` API. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_description: The information necessary to create a service. + :type service_description: ~azure.servicefabric.models.ServiceDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_service.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(service_description, 'ServiceDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_service.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/Create'} # type: ignore + + async def create_service_from_template( + self, + application_id: str, + service_from_template_description: "_models.ServiceFromTemplateDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Creates a Service Fabric service from the service template. + + Creates a Service Fabric service from the service template defined in the application manifest. + A service template contains the properties that will be same for the service instance of the + same type. The API allows overriding the properties that are usually different for different + services of the same service type. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_from_template_description: Describes the service that needs to be created from + the template defined in the application manifest. + :type service_from_template_description: ~azure.servicefabric.models.ServiceFromTemplateDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_service_from_template.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(service_from_template_description, 'ServiceFromTemplateDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_service_from_template.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/CreateFromTemplate'} # type: ignore + + async def delete_service( + self, + service_id: str, + force_remove: Optional[bool] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Deletes an existing Service Fabric service. + + A service must be created before it can be deleted. By default, Service Fabric will try to + close service replicas in a graceful manner and then delete the service. However, if the + service is having issues closing the replica gracefully, the delete operation may take a long + time or get stuck. Use the optional ForceRemove flag to skip the graceful close sequence and + forcefully delete the service. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param force_remove: Remove a Service Fabric application or service forcefully without going + through the graceful shutdown sequence. This parameter can be used to forcefully delete an + application or service for which delete is timing out due to issues in the service code that + prevents graceful close of replicas. + :type force_remove: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_service.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force_remove is not None: + query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_service.metadata = {'url': '/Services/{serviceId}/$/Delete'} # type: ignore + + async def update_service( + self, + service_id: str, + service_update_description: "_models.ServiceUpdateDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Updates a Service Fabric service using the specified update description. + + This API allows updating properties of a running Service Fabric service. The set of properties + that can be updated are a subset of the properties that were specified at the time of creating + the service. The current set of properties can be obtained using ``GetServiceDescription`` API. + Note that updating the properties of a running service is different than upgrading your + application using ``StartApplicationUpgrade`` API. The upgrade is a long running background + operation that involves moving the application from one version to another, one upgrade domain + at a time, whereas update applies the new properties immediately to the service. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param service_update_description: The information necessary to update a service. + :type service_update_description: ~azure.servicefabric.models.ServiceUpdateDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_service.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(service_update_description, 'ServiceUpdateDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + update_service.metadata = {'url': '/Services/{serviceId}/$/Update'} # type: ignore + + async def get_service_description( + self, + service_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ServiceDescription": + """Gets the description of an existing Service Fabric service. + + Gets the description of an existing Service Fabric service. A service must be created before + its description can be obtained. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_description.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_description.metadata = {'url': '/Services/{serviceId}/$/GetDescription'} # type: ignore + + async def get_service_health( + self, + service_id: str, + events_health_state_filter: Optional[int] = 0, + partitions_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ServiceHealth": + """Gets the health of the specified Service Fabric service. + + Gets the health information of the specified service. + Use EventsHealthStateFilter to filter the collection of health events reported on the service + based on the health state. + Use PartitionsHealthStateFilter to filter the collection of partitions returned. + If you specify a service that does not exist in the health store, this request returns an + error. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param partitions_health_state_filter: Allows filtering of the partitions health state objects + returned in the result of service health query based on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only partitions that match the filter are returned. All partitions are used to evaluate the + aggregated health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these value + obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health + state of partitions with HealthState value of OK (2) and Warning (4) will be returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type partitions_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_health.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if partitions_health_state_filter is not None: + query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_health.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} # type: ignore + + async def get_service_health_using_policy( + self, + service_id: str, + events_health_state_filter: Optional[int] = 0, + partitions_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, + **kwargs + ) -> "_models.ServiceHealth": + """Gets the health of the specified Service Fabric service, by using the specified health policy. + + Gets the health information of the specified service. + If the application health policy is specified, the health evaluation uses it to get the + aggregated health state. + If the policy is not specified, the health evaluation uses the application health policy + defined in the application manifest, or the default health policy, if no policy is defined in + the manifest. + Use EventsHealthStateFilter to filter the collection of health events reported on the service + based on the health state. + Use PartitionsHealthStateFilter to filter the collection of partitions returned. + If you specify a service that does not exist in the health store, this request returns an + error. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param partitions_health_state_filter: Allows filtering of the partitions health state objects + returned in the result of service health query based on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only partitions that match the filter are returned. All partitions are used to evaluate the + aggregated health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these value + obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health + state of partitions with HealthState value of OK (2) and Warning (4) will be returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type partitions_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_service_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if partitions_health_state_filter is not None: + query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_health_using_policy.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} # type: ignore + + async def report_service_health( + self, + service_id: str, + health_information: "_models.HealthInformation", + immediate: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Sends a health report on the Service Fabric service. + + Reports health state of the specified Service Fabric service. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway Service, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetServiceHealth and check that + the report appears in the HealthEvents section. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_service_health.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_service_health.metadata = {'url': '/Services/{serviceId}/$/ReportHealth'} # type: ignore + + async def resolve_service( + self, + service_id: str, + partition_key_type: Optional[int] = None, + partition_key_value: Optional[str] = None, + previous_rsp_version: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ResolvedServicePartition": + """Resolve a Service Fabric partition. + + Resolve a Service Fabric service partition to get the endpoints of the service replicas. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_key_type: Key type for the partition. This parameter is required if the + partition scheme for the service is Int64Range or Named. The possible values are following. + + + * None (1) - Indicates that the PartitionKeyValue parameter is not specified. This is valid + for the partitions with partitioning scheme as Singleton. This is the default value. The value + is 1. + * Int64Range (2) - Indicates that the PartitionKeyValue parameter is an int64 partition key. + This is valid for the partitions with partitioning scheme as Int64Range. The value is 2. + * Named (3) - Indicates that the PartitionKeyValue parameter is a name of the partition. This + is valid for the partitions with partitioning scheme as Named. The value is 3. + :type partition_key_type: int + :param partition_key_value: Partition key. This is required if the partition scheme for the + service is Int64Range or Named. + This is not the partition ID, but rather, either the integer key value, or the name of the + partition ID. + For example, if your service is using ranged partitions from 0 to 10, then they + PartitionKeyValue would be an + integer in that range. Query service description to see the range or name. + :type partition_key_value: str + :param previous_rsp_version: The value in the Version field of the response that was received + previously. This is required if the user knows that the result that was gotten previously is + stale. + :type previous_rsp_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ResolvedServicePartition, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ResolvedServicePartition + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ResolvedServicePartition"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.resolve_service.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if partition_key_type is not None: + query_parameters['PartitionKeyType'] = self._serialize.query("partition_key_type", partition_key_type, 'int') + if partition_key_value is not None: + query_parameters['PartitionKeyValue'] = self._serialize.query("partition_key_value", partition_key_value, 'str', skip_quote=True) + if previous_rsp_version is not None: + query_parameters['PreviousRspVersion'] = self._serialize.query("previous_rsp_version", previous_rsp_version, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ResolvedServicePartition', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + resolve_service.metadata = {'url': '/Services/{serviceId}/$/ResolvePartition'} # type: ignore + + async def get_unplaced_replica_information( + self, + service_id: str, + partition_id: Optional[str] = None, + only_query_primaries: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.UnplacedReplicaInformation": + """Gets the information about unplaced replica of the service. + + Returns the information about the unplaced replicas of the service. + If PartitionId is specified, then result will contain information only about unplaced replicas + for that partition. + If PartitionId is not specified, then result will contain information about unplaced replicas + for all partitions of that service. + If OnlyQueryPrimaries is set to true, then result will contain information only about primary + replicas, and will ignore unplaced secondary replicas. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param only_query_primaries: Indicates that unplaced replica information will be queries only + for primary replicas. + :type only_query_primaries: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UnplacedReplicaInformation, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UnplacedReplicaInformation + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UnplacedReplicaInformation"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_unplaced_replica_information.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if partition_id is not None: + query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') + if only_query_primaries is not None: + query_parameters['OnlyQueryPrimaries'] = self._serialize.query("only_query_primaries", only_query_primaries, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UnplacedReplicaInformation', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_unplaced_replica_information.metadata = {'url': '/Services/{serviceId}/$/GetUnplacedReplicaInformation'} # type: ignore + + async def get_loaded_partition_info_list( + self, + metric_name: str, + service_name: Optional[str] = None, + ordering: Optional[Union[str, "_models.Ordering"]] = None, + max_results: Optional[int] = 0, + continuation_token_parameter: Optional[str] = None, + **kwargs + ) -> "_models.LoadedPartitionInformationResultList": + """Gets ordered list of partitions. + + Retrieves partitions which are most/least loaded according to specified metric. + + :param metric_name: Name of the metric based on which to get ordered list of partitions. + :type metric_name: str + :param service_name: The name of a service. + :type service_name: str + :param ordering: Ordering of partitions' load. + :type ordering: str or ~azure.servicefabric.models.Ordering + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: LoadedPartitionInformationResultList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.LoadedPartitionInformationResultList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadedPartitionInformationResultList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_loaded_partition_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['MetricName'] = self._serialize.query("metric_name", metric_name, 'str') + if service_name is not None: + query_parameters['ServiceName'] = self._serialize.query("service_name", service_name, 'str') + if ordering is not None: + query_parameters['Ordering'] = self._serialize.query("ordering", ordering, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('LoadedPartitionInformationResultList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_loaded_partition_info_list.metadata = {'url': '/$/GetLoadedPartitionInfoList'} # type: ignore + + async def get_partition_info_list( + self, + service_id: str, + continuation_token_parameter: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedServicePartitionInfoList": + """Gets the list of partitions of a Service Fabric service. + + The response includes the partition ID, partitioning scheme information, keys supported by the + partition, status, health, and other details about the partition. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedServicePartitionInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedServicePartitionInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServicePartitionInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedServicePartitionInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_info_list.metadata = {'url': '/Services/{serviceId}/$/GetPartitions'} # type: ignore + + async def get_partition_info( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional["_models.ServicePartitionInfo"]: + """Gets the information about a Service Fabric partition. + + Gets the information about the specified partition. The response includes the partition ID, + partitioning scheme information, keys supported by the partition, status, health, and other + details about the partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServicePartitionInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServicePartitionInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServicePartitionInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_info.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServicePartitionInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_info.metadata = {'url': '/Partitions/{partitionId}'} # type: ignore + + async def get_service_name_info( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ServiceNameInfo": + """Gets the name of the Service Fabric service for a partition. + + Gets name of the service for the specified partition. A 404 error is returned if the partition + ID does not exist in the cluster. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceNameInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceNameInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceNameInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_name_info.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceNameInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_name_info.metadata = {'url': '/Partitions/{partitionId}/$/GetServiceName'} # type: ignore + + async def get_partition_health( + self, + partition_id: str, + events_health_state_filter: Optional[int] = 0, + replicas_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PartitionHealth": + """Gets the health of the specified Service Fabric partition. + + Use EventsHealthStateFilter to filter the collection of health events reported on the service + based on the health state. + Use ReplicasHealthStateFilter to filter the collection of ReplicaHealthState objects on the + partition. + If you specify a partition that does not exist in the health store, this request returns an + error. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param replicas_health_state_filter: Allows filtering the collection of ReplicaHealthState + objects on the partition. The value can be obtained from members or bitwise operations on + members of HealthStateFilter. Only replicas that match the filter will be returned. All + replicas will be used to evaluate the aggregated health state. If not specified, all entries + will be returned.The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. For example, If the provided + value is 6 then all of the events with HealthState value of OK (2) and Warning (4) will be + returned. The possible values for this parameter include integer value of one of the following + health states. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type replicas_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_health.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if replicas_health_state_filter is not None: + query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} # type: ignore + + async def get_partition_health_using_policy( + self, + partition_id: str, + events_health_state_filter: Optional[int] = 0, + replicas_health_state_filter: Optional[int] = 0, + exclude_health_statistics: Optional[bool] = False, + timeout: Optional[int] = 60, + application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, + **kwargs + ) -> "_models.PartitionHealth": + """Gets the health of the specified Service Fabric partition, by using the specified health policy. + + Gets the health information of the specified partition. + If the application health policy is specified, the health evaluation uses it to get the + aggregated health state. + If the policy is not specified, the health evaluation uses the application health policy + defined in the application manifest, or the default health policy, if no policy is defined in + the manifest. + Use EventsHealthStateFilter to filter the collection of health events reported on the partition + based on the health state. + Use ReplicasHealthStateFilter to filter the collection of ReplicaHealthState objects on the + partition. Use ApplicationHealthPolicy in the POST body to override the health policies used to + evaluate the health. + If you specify a partition that does not exist in the health store, this request returns an + error. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param replicas_health_state_filter: Allows filtering the collection of ReplicaHealthState + objects on the partition. The value can be obtained from members or bitwise operations on + members of HealthStateFilter. Only replicas that match the filter will be returned. All + replicas will be used to evaluate the aggregated health state. If not specified, all entries + will be returned.The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. For example, If the provided + value is 6 then all of the events with HealthState value of OK (2) and Warning (4) will be + returned. The possible values for this parameter include integer value of one of the following + health states. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type replicas_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_partition_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if replicas_health_state_filter is not None: + query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} # type: ignore + + async def report_partition_health( + self, + partition_id: str, + health_information: "_models.HealthInformation", + immediate: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Sends a health report on the Service Fabric partition. + + Reports health state of the specified Service Fabric partition. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway Partition, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetPartitionHealth and check + that the report appears in the HealthEvents section. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_partition_health.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/ReportHealth'} # type: ignore + + async def get_partition_load_information( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PartitionLoadInformation": + """Gets the load information of the specified Service Fabric partition. + + Returns information about the load of a specified partition. + The response includes a list of load reports for a Service Fabric partition. + Each report includes the load metric name, value, and last reported time in UTC. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionLoadInformation, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionLoadInformation + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionLoadInformation"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_load_information.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionLoadInformation', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_load_information.metadata = {'url': '/Partitions/{partitionId}/$/GetLoadInformation'} # type: ignore + + async def reset_partition_load( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Resets the current load of a Service Fabric partition. + + Resets the current load of a Service Fabric partition to the default load for the service. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.reset_partition_load.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + reset_partition_load.metadata = {'url': '/Partitions/{partitionId}/$/ResetLoad'} # type: ignore + + async def recover_partition( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Indicates to the Service Fabric cluster that it should attempt to recover a specific partition that is currently stuck in quorum loss. + + This operation should only be performed if it is known that the replicas that are down cannot + be recovered. Incorrect use of this API can cause potential data loss. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.recover_partition.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + recover_partition.metadata = {'url': '/Partitions/{partitionId}/$/Recover'} # type: ignore + + async def recover_service_partitions( + self, + service_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Indicates to the Service Fabric cluster that it should attempt to recover the specified service that is currently stuck in quorum loss. + + Indicates to the Service Fabric cluster that it should attempt to recover the specified service + that is currently stuck in quorum loss. This operation should only be performed if it is known + that the replicas that are down cannot be recovered. Incorrect use of this API can cause + potential data loss. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.recover_service_partitions.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + recover_service_partitions.metadata = {'url': '/Services/$/{serviceId}/$/GetPartitions/$/Recover'} # type: ignore + + async def recover_system_partitions( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Indicates to the Service Fabric cluster that it should attempt to recover the system services that are currently stuck in quorum loss. + + Indicates to the Service Fabric cluster that it should attempt to recover the system services + that are currently stuck in quorum loss. This operation should only be performed if it is known + that the replicas that are down cannot be recovered. Incorrect use of this API can cause + potential data loss. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.recover_system_partitions.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + recover_system_partitions.metadata = {'url': '/$/RecoverSystemPartitions'} # type: ignore + + async def recover_all_partitions( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Indicates to the Service Fabric cluster that it should attempt to recover any services (including system services) which are currently stuck in quorum loss. + + This operation should only be performed if it is known that the replicas that are down cannot + be recovered. Incorrect use of this API can cause potential data loss. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.recover_all_partitions.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + recover_all_partitions.metadata = {'url': '/$/RecoverAllPartitions'} # type: ignore + + async def move_primary_replica( + self, + partition_id: str, + node_name: Optional[str] = None, + ignore_constraints: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Moves the primary replica of a partition of a stateful service. + + This command moves the primary replica of a partition of a stateful service, respecting all + constraints. + If NodeName parameter is specified, primary will be moved to the specified node (if constraints + allow it). + If NodeName parameter is not specified, primary replica will be moved to a random node in the + cluster. + If IgnoreConstraints parameter is specified and set to true, then primary will be moved + regardless of the constraints. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param node_name: The name of the node. + :type node_name: str + :param ignore_constraints: Ignore constraints when moving a replica or instance. If this + parameter is not specified, all constraints are honored. + :type ignore_constraints: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.move_primary_replica.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if node_name is not None: + query_parameters['NodeName'] = self._serialize.query("node_name", node_name, 'str') + if ignore_constraints is not None: + query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + move_primary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MovePrimaryReplica'} # type: ignore + + async def move_secondary_replica( + self, + partition_id: str, + current_node_name: str, + new_node_name: Optional[str] = None, + ignore_constraints: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Moves the secondary replica of a partition of a stateful service. + + This command moves the secondary replica of a partition of a stateful service, respecting all + constraints. + CurrentNodeName parameter must be specified to identify the replica that is moved. + Source node name must be specified, but new node name can be omitted, and in that case replica + is moved to a random node. + If IgnoreConstraints parameter is specified and set to true, then secondary will be moved + regardless of the constraints. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param current_node_name: The name of the source node for secondary replica move. + :type current_node_name: str + :param new_node_name: The name of the target node for secondary replica or instance move. If + not specified, replica or instance is moved to a random node. + :type new_node_name: str + :param ignore_constraints: Ignore constraints when moving a replica or instance. If this + parameter is not specified, all constraints are honored. + :type ignore_constraints: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.move_secondary_replica.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') + if new_node_name is not None: + query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') + if ignore_constraints is not None: + query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + move_secondary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MoveSecondaryReplica'} # type: ignore + + async def update_partition_load( + self, + partition_metric_load_description_list: List["_models.PartitionMetricLoadDescription"], + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedUpdatePartitionLoadResultList": + """Update the loads of provided partitions for specific metrics. + + Updates the load value and predicted load value for all the partitions provided for specified + metrics. + + :param partition_metric_load_description_list: Description of updating load for list of + partitions. + :type partition_metric_load_description_list: list[~azure.servicefabric.models.PartitionMetricLoadDescription] + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedUpdatePartitionLoadResultList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedUpdatePartitionLoadResultList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedUpdatePartitionLoadResultList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_partition_load.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(partition_metric_load_description_list, '[PartitionMetricLoadDescription]') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedUpdatePartitionLoadResultList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update_partition_load.metadata = {'url': '/$/UpdatePartitionLoad'} # type: ignore + + async def move_instance( + self, + service_id: str, + partition_id: str, + current_node_name: Optional[str] = None, + new_node_name: Optional[str] = None, + ignore_constraints: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Moves the instance of a partition of a stateless service. + + This command moves the instance of a partition of a stateless service, respecting all + constraints. + Partition id and service name must be specified to be able to move the instance. + CurrentNodeName when specified identifies the instance that is moved. If not specified, random + instance will be moved + New node name can be omitted, and in that case instance is moved to a random node. + If IgnoreConstraints parameter is specified and set to true, then instance will be moved + regardless of the constraints. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param current_node_name: The name of the source node for instance move. If not specified, + instance is moved from a random node. + :type current_node_name: str + :param new_node_name: The name of the target node for secondary replica or instance move. If + not specified, replica or instance is moved to a random node. + :type new_node_name: str + :param ignore_constraints: Ignore constraints when moving a replica or instance. If this + parameter is not specified, all constraints are honored. + :type ignore_constraints: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.move_instance.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if current_node_name is not None: + query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') + if new_node_name is not None: + query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') + if ignore_constraints is not None: + query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + move_instance.metadata = {'url': '/Services/{serviceId}/$/GetPartitions/{partitionId}/$/MoveInstance'} # type: ignore + + async def create_repair_task( + self, + repair_task: "_models.RepairTask", + **kwargs + ) -> "_models.RepairTaskUpdateInfo": + """Creates a new repair task. + + For clusters that have the Repair Manager Service configured, + this API provides a way to create repair tasks that run automatically or manually. + For repair tasks that run automatically, an appropriate repair executor + must be running for each repair action to run automatically. + These are currently only available in specially-configured Azure Cloud Services. + + To create a manual repair task, provide the set of impacted node names and the + expected impact. When the state of the created repair task changes to approved, + you can safely perform repair actions on those nodes. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param repair_task: Describes the repair task to be created or updated. + :type repair_task: ~azure.servicefabric.models.RepairTask + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_repair_task.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(repair_task, 'RepairTask') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_repair_task.metadata = {'url': '/$/CreateRepairTask'} # type: ignore + + async def cancel_repair_task( + self, + repair_task_cancel_description: "_models.RepairTaskCancelDescription", + **kwargs + ) -> "_models.RepairTaskUpdateInfo": + """Requests the cancellation of the given repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param repair_task_cancel_description: Describes the repair task to be cancelled. + :type repair_task_cancel_description: ~azure.servicefabric.models.RepairTaskCancelDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.cancel_repair_task.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(repair_task_cancel_description, 'RepairTaskCancelDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + cancel_repair_task.metadata = {'url': '/$/CancelRepairTask'} # type: ignore + + async def delete_repair_task( + self, + task_id: str, + version: Optional[str] = None, + **kwargs + ) -> None: + """Deletes a completed repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param task_id: The ID of the completed repair task to be deleted. + :type task_id: str + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. + :type version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _repair_task_delete_description = _models.RepairTaskDeleteDescription(task_id=task_id, version=version) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.delete_repair_task.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_repair_task_delete_description, 'RepairTaskDeleteDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_repair_task.metadata = {'url': '/$/DeleteRepairTask'} # type: ignore + + async def get_repair_task_list( + self, + task_id_filter: Optional[str] = None, + state_filter: Optional[int] = None, + executor_filter: Optional[str] = None, + **kwargs + ) -> List["_models.RepairTask"]: + """Gets a list of repair tasks matching the given filters. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param task_id_filter: The repair task ID prefix to be matched. + :type task_id_filter: str + :param state_filter: A bitwise-OR of the following values, specifying which task states should + be included in the result list. + + + * 1 - Created + * 2 - Claimed + * 4 - Preparing + * 8 - Approved + * 16 - Executing + * 32 - Restoring + * 64 - Completed. + :type state_filter: int + :param executor_filter: The name of the repair executor whose claimed tasks should be included + in the list. + :type executor_filter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of RepairTask, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.RepairTask] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.RepairTask"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_repair_task_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if task_id_filter is not None: + query_parameters['TaskIdFilter'] = self._serialize.query("task_id_filter", task_id_filter, 'str') + if state_filter is not None: + query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') + if executor_filter is not None: + query_parameters['ExecutorFilter'] = self._serialize.query("executor_filter", executor_filter, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[RepairTask]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_repair_task_list.metadata = {'url': '/$/GetRepairTaskList'} # type: ignore + + async def force_approve_repair_task( + self, + task_id: str, + version: Optional[str] = None, + **kwargs + ) -> "_models.RepairTaskUpdateInfo": + """Forces the approval of the given repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param task_id: The ID of the repair task. + :type task_id: str + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. + :type version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _repair_task_approve_description = _models.RepairTaskApproveDescription(task_id=task_id, version=version) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.force_approve_repair_task.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_repair_task_approve_description, 'RepairTaskApproveDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + force_approve_repair_task.metadata = {'url': '/$/ForceApproveRepairTask'} # type: ignore + + async def update_repair_task_health_policy( + self, + repair_task_update_health_policy_description: "_models.RepairTaskUpdateHealthPolicyDescription", + **kwargs + ) -> "_models.RepairTaskUpdateInfo": + """Updates the health policy of the given repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param repair_task_update_health_policy_description: Describes the repair task healthy policy + to be updated. + :type repair_task_update_health_policy_description: ~azure.servicefabric.models.RepairTaskUpdateHealthPolicyDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_repair_task_health_policy.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(repair_task_update_health_policy_description, 'RepairTaskUpdateHealthPolicyDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update_repair_task_health_policy.metadata = {'url': '/$/UpdateRepairTaskHealthPolicy'} # type: ignore + + async def update_repair_execution_state( + self, + repair_task: "_models.RepairTask", + **kwargs + ) -> "_models.RepairTaskUpdateInfo": + """Updates the execution state of a repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param repair_task: Describes the repair task to be created or updated. + :type repair_task: ~azure.servicefabric.models.RepairTask + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_repair_execution_state.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(repair_task, 'RepairTask') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update_repair_execution_state.metadata = {'url': '/$/UpdateRepairExecutionState'} # type: ignore + + async def get_replica_info_list( + self, + partition_id: str, + continuation_token_parameter: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedReplicaInfoList": + """Gets the information about replicas of a Service Fabric service partition. + + The GetReplicas endpoint returns information about the replicas of the specified partition. The + response includes the ID, role, status, health, node name, uptime, and other details about the + replica. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedReplicaInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedReplicaInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedReplicaInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_replica_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedReplicaInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_replica_info_list.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas'} # type: ignore + + async def get_replica_info( + self, + partition_id: str, + replica_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional["_models.ReplicaInfo"]: + """Gets the information about a replica of a Service Fabric partition. + + The response includes the ID, role, status, health, node name, uptime, and other details about + the replica. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ReplicaInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ReplicaInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicaInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_replica_info.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ReplicaInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_replica_info.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}'} # type: ignore + + async def get_replica_health( + self, + partition_id: str, + replica_id: str, + events_health_state_filter: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ReplicaHealth": + """Gets the health of a Service Fabric stateful service replica or stateless service instance. + + Gets the health of a Service Fabric replica. + Use EventsHealthStateFilter to filter the collection of health events reported on the replica + based on the health state. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ReplicaHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ReplicaHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicaHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_replica_health.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ReplicaHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} # type: ignore + + async def get_replica_health_using_policy( + self, + partition_id: str, + replica_id: str, + events_health_state_filter: Optional[int] = 0, + timeout: Optional[int] = 60, + application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, + **kwargs + ) -> "_models.ReplicaHealth": + """Gets the health of a Service Fabric stateful service replica or stateless service instance using the specified policy. + + Gets the health of a Service Fabric stateful service replica or stateless service instance. + Use EventsHealthStateFilter to filter the collection of health events reported on the cluster + based on the health state. + Use ApplicationHealthPolicy to optionally override the health policies used to evaluate the + health. This API only uses 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The + rest of the fields are ignored while evaluating the health of the replica. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ReplicaHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ReplicaHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicaHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_replica_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ReplicaHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_replica_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} # type: ignore + + async def report_replica_health( + self, + partition_id: str, + replica_id: str, + health_information: "_models.HealthInformation", + service_kind: Union[str, "_models.ReplicaHealthReportServiceKind"] = "Stateful", + immediate: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Sends a health report on the Service Fabric replica. + + Reports health state of the specified Service Fabric replica. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway Replica, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetReplicaHealth and check that + the report appears in the HealthEvents section. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param service_kind: The kind of service replica (Stateless or Stateful) for which the health + is being reported. Following are the possible values. + :type service_kind: str or ~azure.servicefabric.models.ReplicaHealthReportServiceKind + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_replica_health.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceKind'] = self._serialize.query("service_kind", service_kind, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/ReportHealth'} # type: ignore + + async def get_deployed_service_replica_info_list( + self, + node_name: str, + application_id: str, + partition_id: Optional[str] = None, + service_manifest_name: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional[List["_models.DeployedServiceReplicaInfo"]]: + """Gets the list of replicas deployed on a Service Fabric node. + + Gets the list containing the information about replicas deployed on a Service Fabric node. The + information include partition ID, replica ID, status of the replica, name of the service, name + of the service type, and other information. Use PartitionId or ServiceManifestName query + parameters to return information about the deployed replicas matching the specified values for + those parameters. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServiceReplicaInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServiceReplicaInfo] or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServiceReplicaInfo"]]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_replica_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if partition_id is not None: + query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServiceReplicaInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_replica_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetReplicas'} # type: ignore + + async def get_deployed_service_replica_detail_info( + self, + node_name: str, + partition_id: str, + replica_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.DeployedServiceReplicaDetailInfo": + """Gets the details of replica deployed on a Service Fabric node. + + Gets the details of the replica deployed on a Service Fabric node. The information includes + service kind, service name, current service operation, current service operation start date + time, partition ID, replica/instance ID, reported load, and other information. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedServiceReplicaDetailInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServiceReplicaDetailInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_replica_detail_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_replica_detail_info.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetDetail'} # type: ignore + + async def get_deployed_service_replica_detail_info_by_partition_id( + self, + node_name: str, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.DeployedServiceReplicaDetailInfo": + """Gets the details of replica deployed on a Service Fabric node. + + Gets the details of the replica deployed on a Service Fabric node. The information includes + service kind, service name, current service operation, current service operation start date + time, partition ID, replica/instance ID, reported load, and other information. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedServiceReplicaDetailInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServiceReplicaDetailInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_replica_detail_info_by_partition_id.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_replica_detail_info_by_partition_id.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas'} # type: ignore + + async def restart_replica( + self, + node_name: str, + partition_id: str, + replica_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Restarts a service replica of a persisted service running on a node. + + Restarts a service replica of a persisted service running on a node. Warning - There are no + safety checks performed when this API is used. Incorrect use of this API can lead to + availability loss for stateful services. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.restart_replica.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + restart_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Restart'} # type: ignore + + async def remove_replica( + self, + node_name: str, + partition_id: str, + replica_id: str, + force_remove: Optional[bool] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Removes a service replica running on a node. + + This API simulates a Service Fabric replica failure by removing a replica from a Service Fabric + cluster. The removal closes the replica, transitions the replica to the role None, and then + removes all of the state information of the replica from the cluster. This API tests the + replica state removal path, and simulates the report fault permanent path through client APIs. + Warning - There are no safety checks performed when this API is used. Incorrect use of this API + can lead to data loss for stateful services. In addition, the forceRemove flag impacts all + other replicas hosted in the same process. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param force_remove: Remove a Service Fabric application or service forcefully without going + through the graceful shutdown sequence. This parameter can be used to forcefully delete an + application or service for which delete is timing out due to issues in the service code that + prevents graceful close of replicas. + :type force_remove: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.remove_replica.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force_remove is not None: + query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Delete'} # type: ignore + + async def get_deployed_service_package_info_list( + self, + node_name: str, + application_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> List["_models.DeployedServicePackageInfo"]: + """Gets the list of service packages deployed on a Service Fabric node. + + Returns the information about the service packages deployed on a Service Fabric node for the + given application. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServicePackageInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedServicePackageInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_package_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[DeployedServicePackageInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages'} # type: ignore + + async def get_deployed_service_package_info_list_by_name( + self, + node_name: str, + application_id: str, + service_package_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> Optional[List["_models.DeployedServicePackageInfo"]]: + """Gets the list of service packages deployed on a Service Fabric node matching exactly the specified name. + + Returns the information about the service packages deployed on a Service Fabric node for the + given application. These results are of service packages whose name match exactly the service + package name specified as the parameter. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServicePackageInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServicePackageInfo"]]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_package_info_list_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServicePackageInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_package_info_list_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}'} # type: ignore + + async def get_deployed_service_package_health( + self, + node_name: str, + application_id: str, + service_package_name: str, + events_health_state_filter: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.DeployedServicePackageHealth": + """Gets the information about health of a service package for a specific application deployed for a Service Fabric node and application. + + Gets the information about health of a service package for a specific application deployed on a + Service Fabric node. Use EventsHealthStateFilter to optionally filter for the collection of + HealthEvent objects reported on the deployed service package based on health state. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedServicePackageHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServicePackageHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_package_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedServicePackageHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} # type: ignore + + async def get_deployed_service_package_health_using_policy( + self, + node_name: str, + application_id: str, + service_package_name: str, + events_health_state_filter: Optional[int] = 0, + timeout: Optional[int] = 60, + application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, + **kwargs + ) -> "_models.DeployedServicePackageHealth": + """Gets the information about health of service package for a specific application deployed on a Service Fabric node using the specified policy. + + Gets the information about health of a service package for a specific application deployed on a + Service Fabric node. using the specified policy. Use EventsHealthStateFilter to optionally + filter for the collection of HealthEvent objects reported on the deployed service package based + on health state. Use ApplicationHealthPolicy to optionally override the health policies used to + evaluate the health. This API only uses 'ConsiderWarningAsError' field of the + ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the + deployed service package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedServicePackageHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServicePackageHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_package_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedServicePackageHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_package_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} # type: ignore + + async def report_deployed_service_package_health( + self, + node_name: str, + application_id: str, + service_package_name: str, + health_information: "_models.HealthInformation", + immediate: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Sends a health report on the Service Fabric deployed service package. + + Reports health state of the service package of the application deployed on a Service Fabric + node. The report must contain the information about the source of the health report and + property on which it is reported. + The report is sent to a Service Fabric gateway Service, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, get deployed service package health + and check that the report appears in the HealthEvents section. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_deployed_service_package_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/ReportHealth'} # type: ignore + + async def deploy_service_package_to_node( + self, + node_name: str, + deploy_service_package_to_node_description: "_models.DeployServicePackageToNodeDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Downloads all of the code packages associated with specified service manifest on the specified node. + + This API provides a way to download code packages including the container images on a specific + node outside of the normal application deployment and upgrade path. This is useful for the + large code packages and container images to be present on the node before the actual + application deployment and upgrade, thus significantly reducing the total time required for the + deployment or upgrade. + + :param node_name: The name of the node. + :type node_name: str + :param deploy_service_package_to_node_description: Describes information for deploying a + service package to a Service Fabric node. + :type deploy_service_package_to_node_description: ~azure.servicefabric.models.DeployServicePackageToNodeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.deploy_service_package_to_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(deploy_service_package_to_node_description, 'DeployServicePackageToNodeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + deploy_service_package_to_node.metadata = {'url': '/Nodes/{nodeName}/$/DeployServicePackage'} # type: ignore + + async def get_deployed_code_package_info_list( + self, + node_name: str, + application_id: str, + service_manifest_name: Optional[str] = None, + code_package_name: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> List["_models.DeployedCodePackageInfo"]: + """Gets the list of code packages deployed on a Service Fabric node. + + Gets the list of code packages deployed on a Service Fabric node for the given application. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in service manifest registered as + part of an application type in a Service Fabric cluster. + :type code_package_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedCodePackageInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedCodePackageInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedCodePackageInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_code_package_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if code_package_name is not None: + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[DeployedCodePackageInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_code_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages'} # type: ignore + + async def restart_deployed_code_package( + self, + node_name: str, + application_id: str, + restart_deployed_code_package_description: "_models.RestartDeployedCodePackageDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Restarts a code package deployed on a Service Fabric node in a cluster. + + Restarts a code package deployed on a Service Fabric node in a cluster. This aborts the code + package process, which will restart all the user service replicas hosted in that process. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param restart_deployed_code_package_description: Describes the deployed code package on + Service Fabric node to restart. + :type restart_deployed_code_package_description: ~azure.servicefabric.models.RestartDeployedCodePackageDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.restart_deployed_code_package.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(restart_deployed_code_package_description, 'RestartDeployedCodePackageDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + restart_deployed_code_package.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/Restart'} # type: ignore + + async def get_container_logs_deployed_on_node( + self, + node_name: str, + application_id: str, + service_manifest_name: str, + code_package_name: str, + tail: Optional[str] = None, + previous: Optional[bool] = False, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ContainerLogs": + """Gets the container logs for container deployed on a Service Fabric node. + + Gets the container logs for container deployed on a Service Fabric node for the given code + package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in service manifest registered as + part of an application type in a Service Fabric cluster. + :type code_package_name: str + :param tail: Number of lines to show from the end of the logs. Default is 100. 'all' to show + the complete logs. + :type tail: str + :param previous: Specifies whether to get container logs from exited/dead containers of the + code package instance. + :type previous: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ContainerLogs, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ContainerLogs + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerLogs"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_container_logs_deployed_on_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + if tail is not None: + query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') + if previous is not None: + query_parameters['Previous'] = self._serialize.query("previous", previous, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ContainerLogs', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_container_logs_deployed_on_node.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerLogs'} # type: ignore + + async def invoke_container_api( + self, + node_name: str, + application_id: str, + service_manifest_name: str, + code_package_name: str, + code_package_instance_id: str, + container_api_request_body: "_models.ContainerApiRequestBody", + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ContainerApiResponse": + """Invoke container API on a container deployed on a Service Fabric node. + + Invoke container API on a container deployed on a Service Fabric node for the given code + package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in service manifest registered as + part of an application type in a Service Fabric cluster. + :type code_package_name: str + :param code_package_instance_id: ID that uniquely identifies a code package instance deployed + on a service fabric node. + :type code_package_instance_id: str + :param container_api_request_body: Parameters for making container API call. + :type container_api_request_body: ~azure.servicefabric.models.ContainerApiRequestBody + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ContainerApiResponse, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ContainerApiResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerApiResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.invoke_container_api.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + query_parameters['CodePackageInstanceId'] = self._serialize.query("code_package_instance_id", code_package_instance_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(container_api_request_body, 'ContainerApiRequestBody') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ContainerApiResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + invoke_container_api.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerApi'} # type: ignore + + async def create_compose_deployment( + self, + create_compose_deployment_description: "_models.CreateComposeDeploymentDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Creates a Service Fabric compose deployment. + + Compose is a file format that describes multi-container applications. This API allows deploying + container based applications defined in compose format in a Service Fabric cluster. Once the + deployment is created, its status can be tracked via the ``GetComposeDeploymentStatus`` API. + + :param create_compose_deployment_description: Describes the compose deployment that needs to be + created. + :type create_compose_deployment_description: ~azure.servicefabric.models.CreateComposeDeploymentDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_compose_deployment.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(create_compose_deployment_description, 'CreateComposeDeploymentDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_compose_deployment.metadata = {'url': '/ComposeDeployments/$/Create'} # type: ignore + + async def get_compose_deployment_status( + self, + deployment_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ComposeDeploymentStatusInfo": + """Gets information about a Service Fabric compose deployment. + + Returns the status of the compose deployment that was created or in the process of being + created in the Service Fabric cluster and whose name matches the one specified as the + parameter. The response includes the name, status, and other details about the deployment. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ComposeDeploymentStatusInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ComposeDeploymentStatusInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ComposeDeploymentStatusInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_compose_deployment_status.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ComposeDeploymentStatusInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_compose_deployment_status.metadata = {'url': '/ComposeDeployments/{deploymentName}'} # type: ignore + + async def get_compose_deployment_status_list( + self, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedComposeDeploymentStatusInfoList": + """Gets the list of compose deployments created in the Service Fabric cluster. + + Gets the status about the compose deployments that were created or in the process of being + created in the Service Fabric cluster. The response includes the name, status, and other + details about the compose deployments. If the list of deployments do not fit in a page, one + page of results is returned as well as a continuation token, which can be used to get the next + page. + + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedComposeDeploymentStatusInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedComposeDeploymentStatusInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedComposeDeploymentStatusInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_compose_deployment_status_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedComposeDeploymentStatusInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_compose_deployment_status_list.metadata = {'url': '/ComposeDeployments'} # type: ignore + + async def get_compose_deployment_upgrade_progress( + self, + deployment_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ComposeDeploymentUpgradeProgressInfo": + """Gets details for the latest upgrade performed on this Service Fabric compose deployment. + + Returns the information about the state of the compose deployment upgrade along with details to + aid debugging application health issues. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ComposeDeploymentUpgradeProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ComposeDeploymentUpgradeProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ComposeDeploymentUpgradeProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_compose_deployment_upgrade_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ComposeDeploymentUpgradeProgressInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_compose_deployment_upgrade_progress.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/GetUpgradeProgress'} # type: ignore + + async def remove_compose_deployment( + self, + deployment_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Deletes an existing Service Fabric compose deployment from cluster. + + Deletes an existing Service Fabric compose deployment. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.remove_compose_deployment.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_compose_deployment.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Delete'} # type: ignore + + async def start_compose_deployment_upgrade( + self, + deployment_name: str, + compose_deployment_upgrade_description: "_models.ComposeDeploymentUpgradeDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Starts upgrading a compose deployment in the Service Fabric cluster. + + Validates the supplied upgrade parameters and starts upgrading the deployment if the parameters + are valid. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param compose_deployment_upgrade_description: Parameters for upgrading compose deployment. + :type compose_deployment_upgrade_description: ~azure.servicefabric.models.ComposeDeploymentUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_compose_deployment_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(compose_deployment_upgrade_description, 'ComposeDeploymentUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Upgrade'} # type: ignore + + async def start_rollback_compose_deployment_upgrade( + self, + deployment_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Starts rolling back a compose deployment upgrade in the Service Fabric cluster. + + Rollback a service fabric compose deployment upgrade. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_rollback_compose_deployment_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_rollback_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/RollbackUpgrade'} # type: ignore + + async def get_chaos( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.Chaos": + """Get the status of Chaos. + + Get the status of Chaos indicating whether or not Chaos is running, the Chaos parameters used + for running Chaos and the status of the Chaos Schedule. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Chaos, or the result of cls(response) + :rtype: ~azure.servicefabric.models.Chaos + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.Chaos"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_chaos.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Chaos', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_chaos.metadata = {'url': '/Tools/Chaos'} # type: ignore + + async def start_chaos( + self, + chaos_parameters: "_models.ChaosParameters", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Starts Chaos in the cluster. + + If Chaos is not already running in the cluster, it starts Chaos with the passed in Chaos + parameters. + If Chaos is already running when this call is made, the call fails with the error code + FABRIC_E_CHAOS_ALREADY_RUNNING. + Refer to the article `Induce controlled Chaos in Service Fabric clusters + `_ for more + details. + + :param chaos_parameters: Describes all the parameters to configure a Chaos run. + :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_chaos.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(chaos_parameters, 'ChaosParameters') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_chaos.metadata = {'url': '/Tools/Chaos/$/Start'} # type: ignore + + async def stop_chaos( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Stops Chaos if it is running in the cluster and put the Chaos Schedule in a stopped state. + + Stops Chaos from executing new faults. In-flight faults will continue to execute until they are + complete. The current Chaos Schedule is put into a stopped state. + Once a schedule is stopped, it will stay in the stopped state and not be used to Chaos Schedule + new runs of Chaos. A new Chaos Schedule must be set in order to resume scheduling. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.stop_chaos.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + stop_chaos.metadata = {'url': '/Tools/Chaos/$/Stop'} # type: ignore + + async def get_chaos_events( + self, + continuation_token_parameter: Optional[str] = None, + start_time_utc: Optional[str] = None, + end_time_utc: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ChaosEventsSegment": + """Gets the next segment of the Chaos events based on the continuation token or the time range. + + To get the next segment of the Chaos events, you can specify the ContinuationToken. To get the + start of a new segment of Chaos events, you can specify the time range + through StartTimeUtc and EndTimeUtc. You cannot specify both the ContinuationToken and the time + range in the same call. + When there are more than 100 Chaos events, the Chaos events are returned in multiple segments + where a segment contains no more than 100 Chaos events and to get the next segment you make a + call to this API with the continuation token. + + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param start_time_utc: The Windows file time representing the start time of the time range for + which a Chaos report is to be generated. Consult `DateTime.ToFileTimeUtc Method + `_.aspx) for + details. + :type start_time_utc: str + :param end_time_utc: The Windows file time representing the end time of the time range for + which a Chaos report is to be generated. Consult `DateTime.ToFileTimeUtc Method + `_.aspx) for + details. + :type end_time_utc: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ChaosEventsSegment, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ChaosEventsSegment + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ChaosEventsSegment"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_chaos_events.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if start_time_utc is not None: + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + if end_time_utc is not None: + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ChaosEventsSegment', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_chaos_events.metadata = {'url': '/Tools/Chaos/Events'} # type: ignore + + async def get_chaos_schedule( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ChaosScheduleDescription": + """Get the Chaos Schedule defining when and how to run Chaos. + + Gets the version of the Chaos Schedule in use and the Chaos Schedule that defines when and how + to run Chaos. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ChaosScheduleDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ChaosScheduleDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ChaosScheduleDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_chaos_schedule.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ChaosScheduleDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} # type: ignore + + async def post_chaos_schedule( + self, + timeout: Optional[int] = 60, + version: Optional[int] = None, + schedule: Optional["_models.ChaosSchedule"] = None, + **kwargs + ) -> None: + """Set the schedule used by Chaos. + + Chaos will automatically schedule runs based on the Chaos Schedule. + The Chaos Schedule will be updated if the provided version matches the version on the server. + When updating the Chaos Schedule, the version on the server is incremented by 1. + The version on the server will wrap back to 0 after reaching a large number. + If Chaos is running when this call is made, the call will fail. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param version: The version number of the Schedule. + :type version: int + :param schedule: Defines the schedule used by Chaos. + :type schedule: ~azure.servicefabric.models.ChaosSchedule + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _chaos_schedule = _models.ChaosScheduleDescription(version=version, schedule=schedule) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.post_chaos_schedule.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_chaos_schedule, 'ChaosScheduleDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + post_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} # type: ignore + + async def upload_file( + self, + content_path: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Uploads contents of the file to the image store. + + Uploads contents of the file to the image store. Use this API if the file is small enough to + upload again if the connection fails. The file's data needs to be added to the request body. + The contents will be uploaded to the specified path. Image store service uses a mark file to + indicate the availability of the folder. The mark file is an empty file named "_.dir". The mark + file is generated by the image store service when all files in a folder are uploaded. When + using File-by-File approach to upload application package in REST, the image store service + isn't aware of the file hierarchy of the application package; you need to create a mark file + per folder and upload it last, to let the image store service know that the folder is complete. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.upload_file.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + upload_file.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore + + async def get_image_store_content( + self, + content_path: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ImageStoreContent": + """Gets the image store content information. + + Returns the information about the image store content at the specified contentPath. The + contentPath is relative to the root of the image store. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ImageStoreContent, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ImageStoreContent + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreContent"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_content.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ImageStoreContent', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore + + async def delete_image_store_content( + self, + content_path: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Deletes existing image store content. + + Deletes existing image store content being found within the given image store relative path. + This command can be used to delete uploaded application packages once they are provisioned. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_image_store_content.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore + + async def get_image_store_root_content( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ImageStoreContent": + """Gets the content information at the root of the image store. + + Returns the information about the image store content at the root of the image store. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ImageStoreContent, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ImageStoreContent + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreContent"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_root_content.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ImageStoreContent', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_root_content.metadata = {'url': '/ImageStore'} # type: ignore + + async def copy_image_store_content( + self, + image_store_copy_description: "_models.ImageStoreCopyDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Copies image store content internally. + + Copies the image store content from the source image store relative path to the destination + image store relative path. + + :param image_store_copy_description: Describes the copy description for the image store. + :type image_store_copy_description: ~azure.servicefabric.models.ImageStoreCopyDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.copy_image_store_content.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(image_store_copy_description, 'ImageStoreCopyDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + copy_image_store_content.metadata = {'url': '/ImageStore/$/Copy'} # type: ignore + + async def delete_image_store_upload_session( + self, + session_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Cancels an image store upload session. + + The DELETE request will cause the existing upload session to expire and remove any previously + uploaded file chunks. + + :param session_id: A GUID generated by the user for a file uploading. It identifies an image + store upload session which keeps track of all file chunks until it is committed. + :type session_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_image_store_upload_session.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_image_store_upload_session.metadata = {'url': '/ImageStore/$/DeleteUploadSession'} # type: ignore + + async def commit_image_store_upload_session( + self, + session_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Commit an image store upload session. + + When all file chunks have been uploaded, the upload session needs to be committed explicitly to + complete the upload. Image store preserves the upload session until the expiration time, which + is 30 minutes after the last chunk received. + + :param session_id: A GUID generated by the user for a file uploading. It identifies an image + store upload session which keeps track of all file chunks until it is committed. + :type session_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.commit_image_store_upload_session.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + commit_image_store_upload_session.metadata = {'url': '/ImageStore/$/CommitUploadSession'} # type: ignore + + async def get_image_store_upload_session_by_id( + self, + session_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.UploadSession": + """Get the image store upload session by ID. + + Gets the image store upload session identified by the given ID. User can query the upload + session at any time during uploading. + + :param session_id: A GUID generated by the user for a file uploading. It identifies an image + store upload session which keeps track of all file chunks until it is committed. + :type session_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UploadSession, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UploadSession + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UploadSession"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_upload_session_by_id.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UploadSession', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_upload_session_by_id.metadata = {'url': '/ImageStore/$/GetUploadSession'} # type: ignore + + async def get_image_store_upload_session_by_path( + self, + content_path: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.UploadSession": + """Get the image store upload session by relative path. + + Gets the image store upload session associated with the given image store relative path. User + can query the upload session at any time during uploading. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UploadSession, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UploadSession + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UploadSession"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_upload_session_by_path.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UploadSession', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_upload_session_by_path.metadata = {'url': '/ImageStore/{contentPath}/$/GetUploadSession'} # type: ignore + + async def upload_file_chunk( + self, + content_path: str, + session_id: str, + content_range: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Uploads a file chunk to the image store relative path. + + Uploads a file chunk to the image store with the specified upload session ID and image store + relative path. This API allows user to resume the file upload operation. user doesn't have to + restart the file upload from scratch whenever there is a network interruption. Use this option + if the file size is large. + + To perform a resumable file upload, user need to break the file into multiple chunks and upload + these chunks to the image store one-by-one. Chunks don't have to be uploaded in order. If the + file represented by the image store relative path already exists, it will be overwritten when + the upload session commits. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param session_id: A GUID generated by the user for a file uploading. It identifies an image + store upload session which keeps track of all file chunks until it is committed. + :type session_id: str + :param content_range: When uploading file chunks to the image store, the Content-Range header + field need to be configured and sent with a request. The format should looks like "bytes + {First-Byte-Position}-{Last-Byte-Position}/{File-Length}". For example, Content-Range:bytes + 300-5000/20000 indicates that user is sending bytes 300 through 5,000 and the total file length + is 20,000 bytes. + :type content_range: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.upload_file_chunk.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Range'] = self._serialize.header("content_range", content_range, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + upload_file_chunk.metadata = {'url': '/ImageStore/{contentPath}/$/UploadChunk'} # type: ignore + + async def get_image_store_root_folder_size( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.FolderSizeInfo": + """Get the folder size at the root of the image store. + + Returns the total size of files at the root and children folders in image store. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FolderSizeInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.FolderSizeInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FolderSizeInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_root_folder_size.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('FolderSizeInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_root_folder_size.metadata = {'url': '/ImageStore/$/FolderSize'} # type: ignore + + async def get_image_store_folder_size( + self, + content_path: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.FolderSizeInfo": + """Get the size of a folder in image store. + + Gets the total size of file under a image store folder, specified by contentPath. The + contentPath is relative to the root of the image store. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FolderSizeInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.FolderSizeInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FolderSizeInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_folder_size.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('FolderSizeInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_folder_size.metadata = {'url': '/ImageStore/{contentPath}/$/FolderSize'} # type: ignore + + async def get_image_store_info( + self, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.ImageStoreInfo": + """Gets the overall ImageStore information. + + Returns information about the primary ImageStore replica, such as disk capacity and available + disk space at the node it is on, and several categories of the ImageStore's file system usage. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ImageStoreInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ImageStoreInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_info.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ImageStoreInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_info.metadata = {'url': '/ImageStore/$/Info'} # type: ignore + + async def invoke_infrastructure_command( + self, + command: str, + service_id: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> IO: + """Invokes an administrative command on the given Infrastructure Service instance. + + For clusters that have one or more instances of the Infrastructure Service configured, + this API provides a way to send infrastructure-specific commands to a particular + instance of the Infrastructure Service. + + Available commands and their corresponding response formats vary depending upon + the infrastructure on which the cluster is running. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param command: The text of the command to be invoked. The content of the command is + infrastructure-specific. + :type command: str + :param service_id: The identity of the infrastructure service. This is the full name of the + infrastructure service without the 'fabric:' URI scheme. This parameter required only for the + cluster that has more than one instance of infrastructure service running. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.invoke_infrastructure_command.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['Command'] = self._serialize.query("command", command, 'str') + if service_id is not None: + query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + invoke_infrastructure_command.metadata = {'url': '/$/InvokeInfrastructureCommand'} # type: ignore + + async def invoke_infrastructure_query( + self, + command: str, + service_id: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> IO: + """Invokes a read-only query on the given infrastructure service instance. + + For clusters that have one or more instances of the Infrastructure Service configured, + this API provides a way to send infrastructure-specific queries to a particular + instance of the Infrastructure Service. + + Available commands and their corresponding response formats vary depending upon + the infrastructure on which the cluster is running. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param command: The text of the command to be invoked. The content of the command is + infrastructure-specific. + :type command: str + :param service_id: The identity of the infrastructure service. This is the full name of the + infrastructure service without the 'fabric:' URI scheme. This parameter required only for the + cluster that has more than one instance of infrastructure service running. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.invoke_infrastructure_query.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['Command'] = self._serialize.query("command", command, 'str') + if service_id is not None: + query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + invoke_infrastructure_query.metadata = {'url': '/$/InvokeInfrastructureQuery'} # type: ignore + + async def start_data_loss( + self, + service_id: str, + partition_id: str, + operation_id: str, + data_loss_mode: Union[str, "_models.DataLossMode"], + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """This API will induce data loss for the specified partition. It will trigger a call to the OnDataLossAsync API of the partition. + + This API will induce data loss for the specified partition. It will trigger a call to the + OnDataLoss API of the partition. + Actual data loss will depend on the specified DataLossMode. + + + * PartialDataLoss - Only a quorum of replicas are removed and OnDataLoss is triggered for the + partition but actual data loss depends on the presence of in-flight replication. + * FullDataLoss - All replicas are removed hence all data is lost and OnDataLoss is triggered. + + This API should only be called with a stateful service as the target. + + Calling this API with a system service as the target is not advised. + + Note: Once this API has been called, it cannot be reversed. Calling CancelOperation will only + stop execution and clean up internal system state. + It will not restore data if the command has progressed far enough to cause data loss. + + Call the GetDataLossProgress API with the same OperationId to return information on the + operation started with this API. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param data_loss_mode: This enum is passed to the StartDataLoss API to indicate what type of + data loss to induce. + :type data_loss_mode: str or ~azure.servicefabric.models.DataLossMode + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_data_loss.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['DataLossMode'] = self._serialize.query("data_loss_mode", data_loss_mode, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_data_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartDataLoss'} # type: ignore + + async def get_data_loss_progress( + self, + service_id: str, + partition_id: str, + operation_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PartitionDataLossProgress": + """Gets the progress of a partition data loss operation started using the StartDataLoss API. + + Gets the progress of a data loss operation started with StartDataLoss, using the OperationId. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionDataLossProgress, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionDataLossProgress + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionDataLossProgress"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_data_loss_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionDataLossProgress', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_data_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetDataLossProgress'} # type: ignore + + async def start_quorum_loss( + self, + service_id: str, + partition_id: str, + operation_id: str, + quorum_loss_mode: Union[str, "_models.QuorumLossMode"], + quorum_loss_duration: int, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Induces quorum loss for a given stateful service partition. + + This API is useful for a temporary quorum loss situation on your service. + + Call the GetQuorumLossProgress API with the same OperationId to return information on the + operation started with this API. + + This can only be called on stateful persisted (HasPersistedState==true) services. Do not use + this API on stateless services or stateful in-memory only services. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param quorum_loss_mode: This enum is passed to the StartQuorumLoss API to indicate what type + of quorum loss to induce. + :type quorum_loss_mode: str or ~azure.servicefabric.models.QuorumLossMode + :param quorum_loss_duration: The amount of time for which the partition will be kept in quorum + loss. This must be specified in seconds. + :type quorum_loss_duration: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_quorum_loss.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['QuorumLossMode'] = self._serialize.query("quorum_loss_mode", quorum_loss_mode, 'str') + query_parameters['QuorumLossDuration'] = self._serialize.query("quorum_loss_duration", quorum_loss_duration, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_quorum_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartQuorumLoss'} # type: ignore + + async def get_quorum_loss_progress( + self, + service_id: str, + partition_id: str, + operation_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PartitionQuorumLossProgress": + """Gets the progress of a quorum loss operation on a partition started using the StartQuorumLoss API. + + Gets the progress of a quorum loss operation started with StartQuorumLoss, using the provided + OperationId. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionQuorumLossProgress, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionQuorumLossProgress + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionQuorumLossProgress"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_quorum_loss_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionQuorumLossProgress', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_quorum_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetQuorumLossProgress'} # type: ignore + + async def start_partition_restart( + self, + service_id: str, + partition_id: str, + operation_id: str, + restart_partition_mode: Union[str, "_models.RestartPartitionMode"], + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """This API will restart some or all replicas or instances of the specified partition. + + This API is useful for testing failover. + + If used to target a stateless service partition, RestartPartitionMode must be + AllReplicasOrInstances. + + Call the GetPartitionRestartProgress API using the same OperationId to get the progress. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param restart_partition_mode: Describe which partitions to restart. + :type restart_partition_mode: str or ~azure.servicefabric.models.RestartPartitionMode + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_partition_restart.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['RestartPartitionMode'] = self._serialize.query("restart_partition_mode", restart_partition_mode, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_partition_restart.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartRestart'} # type: ignore + + async def get_partition_restart_progress( + self, + service_id: str, + partition_id: str, + operation_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PartitionRestartProgress": + """Gets the progress of a PartitionRestart operation started using StartPartitionRestart. + + Gets the progress of a PartitionRestart started with StartPartitionRestart using the provided + OperationId. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionRestartProgress, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionRestartProgress + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionRestartProgress"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_restart_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionRestartProgress', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_restart_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetRestartProgress'} # type: ignore + + async def start_node_transition( + self, + node_name: str, + operation_id: str, + node_transition_type: Union[str, "_models.NodeTransitionType"], + node_instance_id: str, + stop_duration_in_seconds: int, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Starts or stops a cluster node. + + Starts or stops a cluster node. A cluster node is a process, not the OS instance itself. To + start a node, pass in "Start" for the NodeTransitionType parameter. + To stop a node, pass in "Stop" for the NodeTransitionType parameter. This API starts the + operation - when the API returns the node may not have finished transitioning yet. + Call GetNodeTransitionProgress with the same OperationId to get the progress of the operation. + + :param node_name: The name of the node. + :type node_name: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param node_transition_type: Indicates the type of transition to perform. + NodeTransitionType.Start will start a stopped node. NodeTransitionType.Stop will stop a node + that is up. + :type node_transition_type: str or ~azure.servicefabric.models.NodeTransitionType + :param node_instance_id: The node instance ID of the target node. This can be determined + through GetNodeInfo API. + :type node_instance_id: str + :param stop_duration_in_seconds: The duration, in seconds, to keep the node stopped. The + minimum value is 600, the maximum is 14400. After this time expires, the node will + automatically come back up. + :type stop_duration_in_seconds: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_node_transition.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['NodeTransitionType'] = self._serialize.query("node_transition_type", node_transition_type, 'str') + query_parameters['NodeInstanceId'] = self._serialize.query("node_instance_id", node_instance_id, 'str') + query_parameters['StopDurationInSeconds'] = self._serialize.query("stop_duration_in_seconds", stop_duration_in_seconds, 'int', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_node_transition.metadata = {'url': '/Faults/Nodes/{nodeName}/$/StartTransition/'} # type: ignore + + async def get_node_transition_progress( + self, + node_name: str, + operation_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.NodeTransitionProgress": + """Gets the progress of an operation started using StartNodeTransition. + + Gets the progress of an operation started with StartNodeTransition using the provided + OperationId. + + :param node_name: The name of the node. + :type node_name: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeTransitionProgress, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeTransitionProgress + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeTransitionProgress"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_transition_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('NodeTransitionProgress', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_transition_progress.metadata = {'url': '/Faults/Nodes/{nodeName}/$/GetTransitionProgress'} # type: ignore + + async def get_fault_operation_list( + self, + type_filter: int = 65535, + state_filter: int = 65535, + timeout: Optional[int] = 60, + **kwargs + ) -> List["_models.OperationStatus"]: + """Gets a list of user-induced fault operations filtered by provided input. + + Gets the list of user-induced fault operations filtered by provided input. + + :param type_filter: Used to filter on OperationType for user-induced operations. + + + * 65535 - select all + * 1 - select PartitionDataLoss. + * 2 - select PartitionQuorumLoss. + * 4 - select PartitionRestart. + * 8 - select NodeTransition. + :type type_filter: int + :param state_filter: Used to filter on OperationState's for user-induced operations. + + + * 65535 - select All + * 1 - select Running + * 2 - select RollingBack + * 8 - select Completed + * 16 - select Faulted + * 32 - select Cancelled + * 64 - select ForceCancelled. + :type state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of OperationStatus, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.OperationStatus] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.OperationStatus"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_fault_operation_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['TypeFilter'] = self._serialize.query("type_filter", type_filter, 'int') + query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[OperationStatus]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_fault_operation_list.metadata = {'url': '/Faults/'} # type: ignore + + async def cancel_operation( + self, + operation_id: str, + force: bool = False, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Cancels a user-induced fault operation. + + The following APIs start fault operations that may be cancelled by using CancelOperation: + StartDataLoss, StartQuorumLoss, StartPartitionRestart, StartNodeTransition. + + If force is false, then the specified user-induced operation will be gracefully stopped and + cleaned up. If force is true, the command will be aborted, and some internal state + may be left behind. Specifying force as true should be used with care. Calling this API with + force set to true is not allowed until this API has already + been called on the same test command with force set to false first, or unless the test command + already has an OperationState of OperationState.RollingBack. + Clarification: OperationState.RollingBack means that the system will be/is cleaning up internal + system state caused by executing the command. It will not restore data if the + test command was to cause data loss. For example, if you call StartDataLoss then call this + API, the system will only clean up internal state from running the command. + It will not restore the target partition's data, if the command progressed far enough to cause + data loss. + + Important note: if this API is invoked with force==true, internal state may be left behind. + + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param force: Indicates whether to gracefully roll back and clean up internal system state + modified by executing the user-induced operation. + :type force: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.cancel_operation.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['Force'] = self._serialize.query("force", force, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + cancel_operation.metadata = {'url': '/Faults/$/Cancel'} # type: ignore + + async def create_backup_policy( + self, + backup_policy_description: "_models.BackupPolicyDescription", + timeout: Optional[int] = 60, + validate_connection: Optional[bool] = False, + **kwargs + ) -> None: + """Creates a backup policy. + + Creates a backup policy which can be associated later with a Service Fabric application, + service or a partition for periodic backup. + + :param backup_policy_description: Describes the backup policy. + :type backup_policy_description: ~azure.servicefabric.models.BackupPolicyDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param validate_connection: Specifies whether to validate the storage connection and + credentials before creating or updating the backup policies. + :type validate_connection: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_backup_policy.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if validate_connection is not None: + query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/$/Create'} # type: ignore + + async def delete_backup_policy( + self, + backup_policy_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Deletes the backup policy. + + Deletes an existing backup policy. A backup policy must be created before it can be deleted. A + currently active backup policy, associated with any Service Fabric application, service or + partition, cannot be deleted without first deleting the mapping. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_backup_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Delete'} # type: ignore + + async def get_backup_policy_list( + self, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedBackupPolicyDescriptionList": + """Gets all the backup policies configured. + + Get a list of all the backup policies configured. + + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupPolicyDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupPolicyDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupPolicyDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_backup_policy_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupPolicyDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_backup_policy_list.metadata = {'url': '/BackupRestore/BackupPolicies'} # type: ignore + + async def get_backup_policy_by_name( + self, + backup_policy_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.BackupPolicyDescription": + """Gets a particular backup policy by name. + + Gets a particular backup policy identified by {backupPolicyName}. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BackupPolicyDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.BackupPolicyDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicyDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_backup_policy_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('BackupPolicyDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_backup_policy_by_name.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}'} # type: ignore + + async def get_all_entities_backed_up_by_policy( + self, + backup_policy_name: str, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedBackupEntityList": + """Gets the list of backup entities that are associated with this policy. + + Returns a list of Service Fabric application, service or partition which are associated with + this backup policy. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupEntityList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupEntityList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupEntityList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_all_entities_backed_up_by_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupEntityList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_all_entities_backed_up_by_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/GetBackupEnabledEntities'} # type: ignore + + async def update_backup_policy( + self, + backup_policy_name: str, + backup_policy_description: "_models.BackupPolicyDescription", + timeout: Optional[int] = 60, + validate_connection: Optional[bool] = False, + **kwargs + ) -> None: + """Updates the backup policy. + + Updates the backup policy identified by {backupPolicyName}. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param backup_policy_description: Describes the backup policy. + :type backup_policy_description: ~azure.servicefabric.models.BackupPolicyDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param validate_connection: Specifies whether to validate the storage connection and + credentials before creating or updating the backup policies. + :type validate_connection: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_backup_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if validate_connection is not None: + query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + update_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Update'} # type: ignore + + async def enable_application_backup( + self, + application_id: str, + backup_policy_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Enables periodic backup of stateful partitions under this Service Fabric application. + + Enables periodic backup of stateful partitions which are part of this Service Fabric + application. Each partition is backed up individually as per the specified backup policy + description. + Note only C# based Reliable Actor and Reliable Stateful services are currently supported for + periodic backup. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.enable_application_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + enable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/EnableBackup'} # type: ignore + + async def disable_application_backup( + self, + application_id: str, + clean_backup: bool, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Disables periodic backup of Service Fabric application. + + Disables periodic backup of Service Fabric application which was previously enabled. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the + backups which were created for the backup entity that is getting disabled for backup. + :type clean_backup: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.disable_application_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _disable_backup_description is not None: + body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + disable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/DisableBackup'} # type: ignore + + async def get_application_backup_configuration_info( + self, + application_id: str, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedBackupConfigurationInfoList": + """Gets the Service Fabric application backup configuration information. + + Gets the Service Fabric backup configuration information for the application and the services + and partitions under this application. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupConfigurationInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupConfigurationInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_backup_configuration_info.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupConfigurationInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_backup_configuration_info.metadata = {'url': '/Applications/{applicationId}/$/GetBackupConfigurationInfo'} # type: ignore + + async def get_application_backup_list( + self, + application_id: str, + timeout: Optional[int] = 60, + latest: Optional[bool] = False, + start_date_time_filter: Optional[datetime.datetime] = None, + end_date_time_filter: Optional[datetime.datetime] = None, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + **kwargs + ) -> "_models.PagedBackupInfoList": + """Gets the list of backups available for every partition in this application. + + Returns a list of backups available for every partition in this Service Fabric application. The + server enumerates all the backups available at the backup location configured in the backup + policy. It also allows filtering of the result based on start and end datetime or just fetching + the latest available backup for every partition. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup available for a partition + for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, all backups from the beginning are enumerated. + :type start_date_time_filter: ~datetime.datetime + :param end_date_time_filter: Specify the end date time till which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, enumeration is done till the most recent backup. + :type end_date_time_filter: ~datetime.datetime + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_backup_list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_backup_list.metadata = {'url': '/Applications/{applicationId}/$/GetBackups'} # type: ignore + + async def suspend_application_backup( + self, + application_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Suspends periodic backup for the specified Service Fabric application. + + The application which is configured to take periodic backups, is suspended for taking further + backups till it is resumed again. This operation applies to the entire application's hierarchy. + It means all the services and partitions under this application are now suspended for backup. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.suspend_application_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + suspend_application_backup.metadata = {'url': '/Applications/{applicationId}/$/SuspendBackup'} # type: ignore + + async def resume_application_backup( + self, + application_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Resumes periodic backup of a Service Fabric application which was previously suspended. + + The previously suspended Service Fabric application resumes taking periodic backup as per the + backup policy currently configured for the same. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.resume_application_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_application_backup.metadata = {'url': '/Applications/{applicationId}/$/ResumeBackup'} # type: ignore + + async def enable_service_backup( + self, + service_id: str, + backup_policy_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Enables periodic backup of stateful partitions under this Service Fabric service. + + Enables periodic backup of stateful partitions which are part of this Service Fabric service. + Each partition is backed up individually as per the specified backup policy description. In + case the application, which the service is part of, is already enabled for backup then this + operation would override the policy being used to take the periodic backup for this service and + its partitions (unless explicitly overridden at the partition level). + Note only C# based Reliable Actor and Reliable Stateful services are currently supported for + periodic backup. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.enable_service_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + enable_service_backup.metadata = {'url': '/Services/{serviceId}/$/EnableBackup'} # type: ignore + + async def disable_service_backup( + self, + service_id: str, + clean_backup: bool, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Disables periodic backup of Service Fabric service which was previously enabled. + + Disables periodic backup of Service Fabric service which was previously enabled. Backup must be + explicitly enabled before it can be disabled. + In case the backup is enabled for the Service Fabric application, which this service is part + of, this service would continue to be periodically backed up as per the policy mapped at the + application level. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the + backups which were created for the backup entity that is getting disabled for backup. + :type clean_backup: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.disable_service_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _disable_backup_description is not None: + body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + disable_service_backup.metadata = {'url': '/Services/{serviceId}/$/DisableBackup'} # type: ignore + + async def get_service_backup_configuration_info( + self, + service_id: str, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedBackupConfigurationInfoList": + """Gets the Service Fabric service backup configuration information. + + Gets the Service Fabric backup configuration information for the service and the partitions + under this service. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupConfigurationInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupConfigurationInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_backup_configuration_info.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupConfigurationInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_backup_configuration_info.metadata = {'url': '/Services/{serviceId}/$/GetBackupConfigurationInfo'} # type: ignore + + async def get_service_backup_list( + self, + service_id: str, + timeout: Optional[int] = 60, + latest: Optional[bool] = False, + start_date_time_filter: Optional[datetime.datetime] = None, + end_date_time_filter: Optional[datetime.datetime] = None, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + **kwargs + ) -> "_models.PagedBackupInfoList": + """Gets the list of backups available for every partition in this service. + + Returns a list of backups available for every partition in this Service Fabric service. The + server enumerates all the backups available in the backup store configured in the backup + policy. It also allows filtering of the result based on start and end datetime or just fetching + the latest available backup for every partition. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup available for a partition + for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, all backups from the beginning are enumerated. + :type start_date_time_filter: ~datetime.datetime + :param end_date_time_filter: Specify the end date time till which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, enumeration is done till the most recent backup. + :type end_date_time_filter: ~datetime.datetime + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_backup_list.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_backup_list.metadata = {'url': '/Services/{serviceId}/$/GetBackups'} # type: ignore + + async def suspend_service_backup( + self, + service_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Suspends periodic backup for the specified Service Fabric service. + + The service which is configured to take periodic backups, is suspended for taking further + backups till it is resumed again. This operation applies to the entire service's hierarchy. It + means all the partitions under this service are now suspended for backup. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.suspend_service_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + suspend_service_backup.metadata = {'url': '/Services/{serviceId}/$/SuspendBackup'} # type: ignore + + async def resume_service_backup( + self, + service_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Resumes periodic backup of a Service Fabric service which was previously suspended. + + The previously suspended Service Fabric service resumes taking periodic backup as per the + backup policy currently configured for the same. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.resume_service_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_service_backup.metadata = {'url': '/Services/{serviceId}/$/ResumeBackup'} # type: ignore + + async def enable_partition_backup( + self, + partition_id: str, + backup_policy_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Enables periodic backup of the stateful persisted partition. + + Enables periodic backup of stateful persisted partition. Each partition is backed up as per the + specified backup policy description. In case the application or service, which is partition is + part of, is already enabled for backup then this operation would override the policy being used + to take the periodic backup of this partition. + Note only C# based Reliable Actor and Reliable Stateful services are currently supported for + periodic backup. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.enable_partition_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + enable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/EnableBackup'} # type: ignore + + async def disable_partition_backup( + self, + partition_id: str, + clean_backup: bool, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Disables periodic backup of Service Fabric partition which was previously enabled. + + Disables periodic backup of partition which was previously enabled. Backup must be explicitly + enabled before it can be disabled. + In case the backup is enabled for the Service Fabric application or service, which this + partition is part of, this partition would continue to be periodically backed up as per the + policy mapped at the higher level entity. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the + backups which were created for the backup entity that is getting disabled for backup. + :type clean_backup: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.disable_partition_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _disable_backup_description is not None: + body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + disable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/DisableBackup'} # type: ignore + + async def get_partition_backup_configuration_info( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PartitionBackupConfigurationInfo": + """Gets the partition backup configuration information. + + Gets the Service Fabric Backup configuration information for the specified partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionBackupConfigurationInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionBackupConfigurationInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionBackupConfigurationInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_backup_configuration_info.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionBackupConfigurationInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_backup_configuration_info.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupConfigurationInfo'} # type: ignore + + async def get_partition_backup_list( + self, + partition_id: str, + timeout: Optional[int] = 60, + latest: Optional[bool] = False, + start_date_time_filter: Optional[datetime.datetime] = None, + end_date_time_filter: Optional[datetime.datetime] = None, + **kwargs + ) -> "_models.PagedBackupInfoList": + """Gets the list of backups available for the specified partition. + + Returns a list of backups available for the specified partition. The server enumerates all the + backups available in the backup store configured in the backup policy. It also allows filtering + of the result based on start and end datetime or just fetching the latest available backup for + the partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup available for a partition + for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, all backups from the beginning are enumerated. + :type start_date_time_filter: ~datetime.datetime + :param end_date_time_filter: Specify the end date time till which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, enumeration is done till the most recent backup. + :type end_date_time_filter: ~datetime.datetime + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_backup_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_backup_list.metadata = {'url': '/Partitions/{partitionId}/$/GetBackups'} # type: ignore + + async def suspend_partition_backup( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Suspends periodic backup for the specified partition. + + The partition which is configured to take periodic backups, is suspended for taking further + backups till it is resumed again. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.suspend_partition_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + suspend_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/SuspendBackup'} # type: ignore + + async def resume_partition_backup( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Resumes periodic backup of partition which was previously suspended. + + The previously suspended partition resumes taking periodic backup as per the backup policy + currently configured for the same. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.resume_partition_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/ResumeBackup'} # type: ignore + + async def backup_partition( + self, + partition_id: str, + backup_timeout: Optional[int] = 10, + timeout: Optional[int] = 60, + backup_storage: Optional["_models.BackupStorageDescription"] = None, + **kwargs + ) -> None: + """Triggers backup of the partition's state. + + Creates a backup of the stateful persisted partition's state. In case the partition is already + being periodically backed up, then by default the new backup is created at the same backup + storage. One can also override the same by specifying the backup storage details as part of the + request body. Once the backup is initiated, its progress can be tracked using the + GetBackupProgress operation. + In case, the operation times out, specify a greater backup timeout value in the query + parameter. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param backup_timeout: Specifies the maximum amount of time, in minutes, to wait for the backup + operation to complete. Post that, the operation completes with timeout error. However, in + certain corner cases it could be that though the operation returns back timeout, the backup + actually goes through. In case of timeout error, its recommended to invoke this operation again + with a greater timeout value. The default value for the same is 10 minutes. + :type backup_timeout: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param backup_storage: Specifies the details of the backup storage where to save the backup. + :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _backup_partition_description = _models.BackupPartitionDescription(backup_storage=backup_storage) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.backup_partition.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if backup_timeout is not None: + query_parameters['BackupTimeout'] = self._serialize.query("backup_timeout", backup_timeout, 'int') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _backup_partition_description is not None: + body_content = self._serialize.body(_backup_partition_description, 'BackupPartitionDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + backup_partition.metadata = {'url': '/Partitions/{partitionId}/$/Backup'} # type: ignore + + async def get_partition_backup_progress( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.BackupProgressInfo": + """Gets details for the latest backup triggered for this partition. + + Returns information about the state of the latest backup along with details or failure reason + in case of completion. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BackupProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.BackupProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_backup_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('BackupProgressInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_backup_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupProgress'} # type: ignore + + async def restore_partition( + self, + partition_id: str, + restore_partition_description: "_models.RestorePartitionDescription", + restore_timeout: Optional[int] = 10, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Triggers restore of the state of the partition using the specified restore partition description. + + Restores the state of a of the stateful persisted partition using the specified backup point. + In case the partition is already being periodically backed up, then by default the backup point + is looked for in the storage specified in backup policy. One can also override the same by + specifying the backup storage details as part of the restore partition description in body. + Once the restore is initiated, its progress can be tracked using the GetRestoreProgress + operation. + In case, the operation times out, specify a greater restore timeout value in the query + parameter. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param restore_partition_description: Describes the parameters to restore the partition. + :type restore_partition_description: ~azure.servicefabric.models.RestorePartitionDescription + :param restore_timeout: Specifies the maximum amount of time to wait, in minutes, for the + restore operation to complete. Post that, the operation returns back with timeout error. + However, in certain corner cases it could be that the restore operation goes through even + though it completes with timeout. In case of timeout error, its recommended to invoke this + operation again with a greater timeout value. the default value for the same is 10 minutes. + :type restore_timeout: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.restore_partition.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if restore_timeout is not None: + query_parameters['RestoreTimeout'] = self._serialize.query("restore_timeout", restore_timeout, 'int') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(restore_partition_description, 'RestorePartitionDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + restore_partition.metadata = {'url': '/Partitions/{partitionId}/$/Restore'} # type: ignore + + async def get_partition_restore_progress( + self, + partition_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.RestoreProgressInfo": + """Gets details for the latest restore operation triggered for this partition. + + Returns information about the state of the latest restore operation along with details or + failure reason in case of completion. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RestoreProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RestoreProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RestoreProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_restore_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RestoreProgressInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_restore_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetRestoreProgress'} # type: ignore + + async def get_backups_from_backup_location( + self, + get_backup_by_storage_query_description: "_models.GetBackupByStorageQueryDescription", + timeout: Optional[int] = 60, + continuation_token_parameter: Optional[str] = None, + max_results: Optional[int] = 0, + **kwargs + ) -> "_models.PagedBackupInfoList": + """Gets the list of backups available for the specified backed up entity at the specified backup location. + + Gets the list of backups available for the specified backed up entity (Application, Service or + Partition) at the specified backup location (FileShare or Azure Blob Storage). + + :param get_backup_by_storage_query_description: Describes the filters and backup storage + details to be used for enumerating backups. + :type get_backup_by_storage_query_description: ~azure.servicefabric.models.GetBackupByStorageQueryDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_backups_from_backup_location.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(get_backup_by_storage_query_description, 'GetBackupByStorageQueryDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_backups_from_backup_location.metadata = {'url': '/BackupRestore/$/GetBackups'} # type: ignore + + async def create_name( + self, + name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Creates a Service Fabric name. + + Creates the specified Service Fabric name. + + :param name: The Service Fabric name, including the 'fabric:' URI scheme. + :type name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _name_description = _models.NameDescription(name=name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_name.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_name_description, 'NameDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_name.metadata = {'url': '/Names/$/Create'} # type: ignore + + async def get_name_exists_info( + self, + name_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Returns whether the Service Fabric name exists. + + Returns whether the specified Service Fabric name exists. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_name_exists_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + get_name_exists_info.metadata = {'url': '/Names/{nameId}'} # type: ignore + + async def delete_name( + self, + name_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Deletes a Service Fabric name. + + Deletes the specified Service Fabric name. A name must be created before it can be deleted. + Deleting a name with child properties will fail. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_name.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_name.metadata = {'url': '/Names/{nameId}'} # type: ignore + + async def get_sub_name_info_list( + self, + name_id: str, + recursive: Optional[bool] = False, + continuation_token_parameter: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedSubNameInfoList": + """Enumerates all the Service Fabric names under a given name. + + Enumerates all the Service Fabric names under a given name. If the subnames do not fit in a + page, one page of results is returned as well as a continuation token, which can be used to get + the next page. Querying a name that doesn't exist will fail. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param recursive: Allows specifying that the search performed should be recursive. + :type recursive: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedSubNameInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedSubNameInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSubNameInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_sub_name_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if recursive is not None: + query_parameters['Recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedSubNameInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_sub_name_info_list.metadata = {'url': '/Names/{nameId}/$/GetSubNames'} # type: ignore + + async def get_property_info_list( + self, + name_id: str, + include_values: Optional[bool] = False, + continuation_token_parameter: Optional[str] = None, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PagedPropertyInfoList": + """Gets information on all Service Fabric properties under a given name. + + A Service Fabric name can have one or more named properties that store custom information. This + operation gets the information about these properties in a paged list. The information includes + name, value, and metadata about each of the properties. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param include_values: Allows specifying whether to include the values of the properties + returned. True if values should be returned with the metadata; False to return only property + metadata. + :type include_values: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedPropertyInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedPropertyInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedPropertyInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_property_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if include_values is not None: + query_parameters['IncludeValues'] = self._serialize.query("include_values", include_values, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedPropertyInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_property_info_list.metadata = {'url': '/Names/{nameId}/$/GetProperties'} # type: ignore + + async def put_property( + self, + name_id: str, + property_description: "_models.PropertyDescription", + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Creates or updates a Service Fabric property. + + Creates or updates the specified Service Fabric property under a given name. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param property_description: Describes the Service Fabric property to be created. + :type property_description: ~azure.servicefabric.models.PropertyDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.put_property.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(property_description, 'PropertyDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + put_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore + + async def get_property_info( + self, + name_id: str, + property_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> "_models.PropertyInfo": + """Gets the specified Service Fabric property. + + Gets the specified Service Fabric property under a given name. This will always return both + value and metadata. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param property_name: Specifies the name of the property to get. + :type property_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PropertyInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PropertyInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PropertyInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_property_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PropertyInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_property_info.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore + + async def delete_property( + self, + name_id: str, + property_name: str, + timeout: Optional[int] = 60, + **kwargs + ) -> None: + """Deletes the specified Service Fabric property. + + Deletes the specified Service Fabric property under a given name. A property must be created + before it can be deleted. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param property_name: Specifies the name of the property to get. + :type property_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_property.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore + + async def submit_property_batch( + self, + name_id: str, + timeout: Optional[int] = 60, + operations: Optional[List["_models.PropertyBatchOperation"]] = None, + **kwargs + ) -> Union["_models.SuccessfulPropertyBatchInfo", "_models.FailedPropertyBatchInfo"]: + """Submits a property batch. + + Submits a batch of property operations. Either all or none of the operations will be committed. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param operations: A list of the property batch operations to be executed. + :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SuccessfulPropertyBatchInfo or FailedPropertyBatchInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SuccessfulPropertyBatchInfo or ~azure.servicefabric.models.FailedPropertyBatchInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.SuccessfulPropertyBatchInfo", "_models.FailedPropertyBatchInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _property_batch_description_list = _models.PropertyBatchDescriptionList(operations=operations) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.submit_property_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_property_batch_description_list, 'PropertyBatchDescriptionList') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 409]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if response.status_code == 200: + deserialized = self._deserialize('SuccessfulPropertyBatchInfo', pipeline_response) + + if response.status_code == 409: + deserialized = self._deserialize('FailedPropertyBatchInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + submit_property_batch.metadata = {'url': '/Names/{nameId}/$/GetProperties/$/SubmitBatch'} # type: ignore + + async def get_cluster_event_list( + self, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.ClusterEvent"]: + """Gets all Cluster-related events. + + The response is list of ClusterEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ClusterEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ClusterEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ClusterEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ClusterEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_event_list.metadata = {'url': '/EventsStore/Cluster/Events'} # type: ignore + + async def get_containers_event_list( + self, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.ContainerInstanceEvent"]: + """Gets all Containers-related events. + + The response is list of ContainerInstanceEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ContainerInstanceEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ContainerInstanceEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ContainerInstanceEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_containers_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ContainerInstanceEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_containers_event_list.metadata = {'url': '/EventsStore/Containers/Events'} # type: ignore + + async def get_node_event_list( + self, + node_name: str, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.NodeEvent"]: + """Gets a Node-related events. + + The response is list of NodeEvent objects. + + :param node_name: The name of the node. + :type node_name: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of NodeEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.NodeEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.NodeEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[NodeEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_event_list.metadata = {'url': '/EventsStore/Nodes/{nodeName}/$/Events'} # type: ignore + + async def get_nodes_event_list( + self, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.NodeEvent"]: + """Gets all Nodes-related Events. + + The response is list of NodeEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of NodeEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.NodeEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.NodeEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_nodes_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[NodeEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_nodes_event_list.metadata = {'url': '/EventsStore/Nodes/Events'} # type: ignore + + async def get_application_event_list( + self, + application_id: str, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.ApplicationEvent"]: + """Gets an Application-related events. + + The response is list of ApplicationEvent objects. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ApplicationEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ApplicationEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ApplicationEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_event_list.metadata = {'url': '/EventsStore/Applications/{applicationId}/$/Events'} # type: ignore + + async def get_applications_event_list( + self, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.ApplicationEvent"]: + """Gets all Applications-related events. + + The response is list of ApplicationEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ApplicationEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ApplicationEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_applications_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ApplicationEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_applications_event_list.metadata = {'url': '/EventsStore/Applications/Events'} # type: ignore + + async def get_service_event_list( + self, + service_id: str, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.ServiceEvent"]: + """Gets a Service-related events. + + The response is list of ServiceEvent objects. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ServiceEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ServiceEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ServiceEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_event_list.metadata = {'url': '/EventsStore/Services/{serviceId}/$/Events'} # type: ignore + + async def get_services_event_list( + self, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.ServiceEvent"]: + """Gets all Services-related events. + + The response is list of ServiceEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ServiceEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ServiceEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_services_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ServiceEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_services_event_list.metadata = {'url': '/EventsStore/Services/Events'} # type: ignore + + async def get_partition_event_list( + self, + partition_id: str, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.PartitionEvent"]: + """Gets a Partition-related events. + + The response is list of PartitionEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of PartitionEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.PartitionEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PartitionEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[PartitionEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Events'} # type: ignore + + async def get_partitions_event_list( + self, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.PartitionEvent"]: + """Gets all Partitions-related events. + + The response is list of PartitionEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of PartitionEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.PartitionEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PartitionEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partitions_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[PartitionEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partitions_event_list.metadata = {'url': '/EventsStore/Partitions/Events'} # type: ignore + + async def get_partition_replica_event_list( + self, + partition_id: str, + replica_id: str, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.ReplicaEvent"]: + """Gets a Partition Replica-related events. + + The response is list of ReplicaEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ReplicaEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ReplicaEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ReplicaEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_replica_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ReplicaEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_replica_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/{replicaId}/$/Events'} # type: ignore + + async def get_partition_replicas_event_list( + self, + partition_id: str, + start_time_utc: str, + end_time_utc: str, + timeout: Optional[int] = 60, + events_types_filter: Optional[str] = None, + exclude_analysis_events: Optional[bool] = None, + skip_correlation_lookup: Optional[bool] = None, + **kwargs + ) -> List["_models.ReplicaEvent"]: + """Gets all Replicas-related events for a Partition. + + The response is list of ReplicaEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ReplicaEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ReplicaEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ReplicaEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_replicas_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ReplicaEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_replicas_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/Events'} # type: ignore + + async def get_correlated_event_list( + self, + event_instance_id: str, + timeout: Optional[int] = 60, + **kwargs + ) -> List["_models.FabricEvent"]: + """Gets all correlated events for a given event. + + The response is list of FabricEvents. + + :param event_instance_id: The EventInstanceId. + :type event_instance_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of FabricEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.FabricEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_correlated_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'eventInstanceId': self._serialize.url("event_instance_id", event_instance_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[FabricEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_correlated_event_list.metadata = {'url': '/EventsStore/CorrelatedEvents/{eventInstanceId}/$/Events'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/__init__.py index 4b8b0c07a61e..f14afc288e10 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/__init__.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/__init__.py @@ -1,12 +1,9 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- try: @@ -47,7 +44,6 @@ from ._models_py3 import ApplicationScopedVolume from ._models_py3 import ApplicationScopedVolumeCreationParameters from ._models_py3 import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk - from ._models_py3 import ApplicationsHealthEvaluation from ._models_py3 import ApplicationTypeApplicationsHealthEvaluation from ._models_py3 import ApplicationTypeHealthPolicyMapItem from ._models_py3 import ApplicationTypeImageStorePath @@ -61,6 +57,7 @@ from ._models_py3 import ApplicationUpgradeRollbackStartedEvent from ._models_py3 import ApplicationUpgradeStartedEvent from ._models_py3 import ApplicationUpgradeUpdateDescription + from ._models_py3 import ApplicationsHealthEvaluation from ._models_py3 import AutoScalingMechanism from ._models_py3 import AutoScalingMetric from ._models_py3 import AutoScalingPolicy @@ -86,8 +83,8 @@ from ._models_py3 import ChaosCodePackageRestartScheduledEvent from ._models_py3 import ChaosContext from ._models_py3 import ChaosEvent - from ._models_py3 import ChaosEventsSegment from ._models_py3 import ChaosEventWrapper + from ._models_py3 import ChaosEventsSegment from ._models_py3 import ChaosNodeRestartScheduledEvent from ._models_py3 import ChaosParameters from ._models_py3 import ChaosParametersDictionaryItem @@ -149,6 +146,7 @@ from ._models_py3 import DefaultExecutionPolicy from ._models_py3 import DeletePropertyBatchOperation from ._models_py3 import DeltaNodesCheckHealthEvaluation + from ._models_py3 import DeployServicePackageToNodeDescription from ._models_py3 import DeployedApplicationHealth from ._models_py3 import DeployedApplicationHealthEvaluation from ._models_py3 import DeployedApplicationHealthReportExpiredEvent @@ -177,7 +175,6 @@ from ._models_py3 import DeployedStatefulServiceReplicaInfo from ._models_py3 import DeployedStatelessServiceInstanceDetailInfo from ._models_py3 import DeployedStatelessServiceInstanceInfo - from ._models_py3 import DeployServicePackageToNodeDescription from ._models_py3 import DiagnosticsDescription from ._models_py3 import DiagnosticsRef from ._models_py3 import DiagnosticsSinkProperties @@ -203,7 +200,7 @@ from ._models_py3 import ExternalStoreProvisionApplicationTypeDescription from ._models_py3 import FabricCodeVersionInfo from ._models_py3 import FabricConfigVersionInfo - from ._models_py3 import FabricError, FabricErrorException + from ._models_py3 import FabricError from ._models_py3 import FabricErrorError from ._models_py3 import FabricEvent from ._models_py3 import FailedPropertyBatchInfo @@ -239,6 +236,7 @@ from ._models_py3 import ImageStoreCopyDescription from ._models_py3 import ImageStoreInfo from ._models_py3 import InlinedValueSecretResourceProperties + from ._models_py3 import InstanceLifecycleDescription from ._models_py3 import Int64PropertyValue from ._models_py3 import Int64RangePartitionInformation from ._models_py3 import InvokeDataLossResult @@ -247,9 +245,13 @@ from ._models_py3 import LoadMetricInformation from ._models_py3 import LoadMetricReport from ._models_py3 import LoadMetricReportInfo + from ._models_py3 import LoadedPartitionInformationQueryDescription + from ._models_py3 import LoadedPartitionInformationResult + from ._models_py3 import LoadedPartitionInformationResultList from ._models_py3 import LocalNetworkResourceProperties from ._models_py3 import ManagedApplicationIdentity from ._models_py3 import ManagedApplicationIdentityDescription + from ._models_py3 import ManagedIdentityAzureBlobBackupStorageDescription from ._models_py3 import MetricLoadDescription from ._models_py3 import MonitoringPolicyDescription from ._models_py3 import NameDescription @@ -288,11 +290,14 @@ from ._models_py3 import NodeRepairImpactDescription from ._models_py3 import NodeRepairTargetDescription from ._models_py3 import NodeResult - from ._models_py3 import NodesHealthEvaluation + from ._models_py3 import NodeTagsDescription from ._models_py3 import NodeTransitionProgress from ._models_py3 import NodeTransitionResult + from ._models_py3 import NodeTypeHealthPolicyMapItem + from ._models_py3 import NodeTypeNodesHealthEvaluation from ._models_py3 import NodeUpEvent from ._models_py3 import NodeUpgradeProgressInfo + from ._models_py3 import NodesHealthEvaluation from ._models_py3 import OperationStatus from ._models_py3 import PackageSharingPolicyInfo from ._models_py3 import PagedApplicationInfoList @@ -382,9 +387,10 @@ from ._models_py3 import ReplicaHealthStateChunkList from ._models_py3 import ReplicaHealthStateFilter from ._models_py3 import ReplicaInfo + from ._models_py3 import ReplicaLifecycleDescription from ._models_py3 import ReplicaMetricLoadDescription - from ._models_py3 import ReplicasHealthEvaluation from ._models_py3 import ReplicaStatusBase + from ._models_py3 import ReplicasHealthEvaluation from ._models_py3 import ReplicatorQueueStatus from ._models_py3 import ReplicatorStatus from ._models_py3 import ResolvedServiceEndpoint @@ -416,6 +422,7 @@ from ._models_py3 import SecretValue from ._models_py3 import SecretValueProperties from ._models_py3 import SecretValueResourceDescription + from ._models_py3 import SecretValueResourceProperties from ._models_py3 import SeedNodeSafetyCheck from ._models_py3 import SelectedPartition from ._models_py3 import ServiceBackupConfigurationInfo @@ -439,17 +446,18 @@ from ._models_py3 import ServiceNameInfo from ._models_py3 import ServiceNewHealthReportEvent from ._models_py3 import ServicePartitionInfo + from ._models_py3 import ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription from ._models_py3 import ServicePlacementInvalidDomainPolicyDescription from ._models_py3 import ServicePlacementNonPartiallyPlaceServicePolicyDescription from ._models_py3 import ServicePlacementPolicyDescription from ._models_py3 import ServicePlacementPreferPrimaryDomainPolicyDescription - from ._models_py3 import ServicePlacementRequiredDomainPolicyDescription from ._models_py3 import ServicePlacementRequireDomainDistributionPolicyDescription + from ._models_py3 import ServicePlacementRequiredDomainPolicyDescription from ._models_py3 import ServiceProperties from ._models_py3 import ServiceReplicaDescription from ._models_py3 import ServiceReplicaProperties from ._models_py3 import ServiceResourceDescription - from ._models_py3 import ServicesHealthEvaluation + from ._models_py3 import ServiceResourceProperties from ._models_py3 import ServiceTypeDescription from ._models_py3 import ServiceTypeExtensionDescription from ._models_py3 import ServiceTypeHealthPolicy @@ -458,6 +466,7 @@ from ._models_py3 import ServiceTypeManifest from ._models_py3 import ServiceUpdateDescription from ._models_py3 import ServiceUpgradeProgress + from ._models_py3 import ServicesHealthEvaluation from ._models_py3 import Setting from ._models_py3 import SingletonPartitionInformation from ._models_py3 import SingletonPartitionSchemeDescription @@ -517,513 +526,526 @@ from ._models_py3 import WaitForReconfigurationSafetyCheck from ._models_py3 import WaitingChaosEvent except (SyntaxError, ImportError): - from ._models import AadMetadata - from ._models import AadMetadataObject - from ._models import AddRemoveIncrementalNamedPartitionScalingMechanism - from ._models import AddRemoveReplicaScalingMechanism - from ._models import AnalysisEventMetadata - from ._models import ApplicationBackupConfigurationInfo - from ._models import ApplicationBackupEntity - from ._models import ApplicationCapacityDescription - from ._models import ApplicationContainerInstanceExitedEvent - from ._models import ApplicationCreatedEvent - from ._models import ApplicationDeletedEvent - from ._models import ApplicationDescription - from ._models import ApplicationEvent - from ._models import ApplicationHealth - from ._models import ApplicationHealthEvaluation - from ._models import ApplicationHealthPolicies - from ._models import ApplicationHealthPolicy - from ._models import ApplicationHealthPolicyMapItem - from ._models import ApplicationHealthPolicyMapObject - from ._models import ApplicationHealthReportExpiredEvent - from ._models import ApplicationHealthState - from ._models import ApplicationHealthStateChunk - from ._models import ApplicationHealthStateChunkList - from ._models import ApplicationHealthStateFilter - from ._models import ApplicationInfo - from ._models import ApplicationLoadInfo - from ._models import ApplicationLoadMetricInformation - from ._models import ApplicationMetricDescription - from ._models import ApplicationNameInfo - from ._models import ApplicationNewHealthReportEvent - from ._models import ApplicationParameter - from ._models import ApplicationProcessExitedEvent - from ._models import ApplicationResourceDescription - from ._models import ApplicationResourceUpgradeProgressInfo - from ._models import ApplicationScopedVolume - from ._models import ApplicationScopedVolumeCreationParameters - from ._models import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk - from ._models import ApplicationsHealthEvaluation - from ._models import ApplicationTypeApplicationsHealthEvaluation - from ._models import ApplicationTypeHealthPolicyMapItem - from ._models import ApplicationTypeImageStorePath - from ._models import ApplicationTypeInfo - from ._models import ApplicationTypeManifest - from ._models import ApplicationUpgradeCompletedEvent - from ._models import ApplicationUpgradeDescription - from ._models import ApplicationUpgradeDomainCompletedEvent - from ._models import ApplicationUpgradeProgressInfo - from ._models import ApplicationUpgradeRollbackCompletedEvent - from ._models import ApplicationUpgradeRollbackStartedEvent - from ._models import ApplicationUpgradeStartedEvent - from ._models import ApplicationUpgradeUpdateDescription - from ._models import AutoScalingMechanism - from ._models import AutoScalingMetric - from ._models import AutoScalingPolicy - from ._models import AutoScalingResourceMetric - from ._models import AutoScalingTrigger - from ._models import AverageLoadScalingTrigger - from ._models import AveragePartitionLoadScalingTrigger - from ._models import AverageServiceLoadScalingTrigger - from ._models import AzureBlobBackupStorageDescription - from ._models import AzureInternalMonitoringPipelineSinkDescription - from ._models import BackupConfigurationInfo - from ._models import BackupEntity - from ._models import BackupInfo - from ._models import BackupPartitionDescription - from ._models import BackupPolicyDescription - from ._models import BackupProgressInfo - from ._models import BackupScheduleDescription - from ._models import BackupStorageDescription - from ._models import BackupSuspensionInfo - from ._models import BasicRetentionPolicyDescription - from ._models import BinaryPropertyValue - from ._models import Chaos - from ._models import ChaosCodePackageRestartScheduledEvent - from ._models import ChaosContext - from ._models import ChaosEvent - from ._models import ChaosEventsSegment - from ._models import ChaosEventWrapper - from ._models import ChaosNodeRestartScheduledEvent - from ._models import ChaosParameters - from ._models import ChaosParametersDictionaryItem - from ._models import ChaosPartitionPrimaryMoveScheduledEvent - from ._models import ChaosPartitionSecondaryMoveScheduledEvent - from ._models import ChaosReplicaRemovalScheduledEvent - from ._models import ChaosReplicaRestartScheduledEvent - from ._models import ChaosSchedule - from ._models import ChaosScheduleDescription - from ._models import ChaosScheduleJob - from ._models import ChaosScheduleJobActiveDaysOfWeek - from ._models import ChaosStartedEvent - from ._models import ChaosStoppedEvent - from ._models import ChaosTargetFilter - from ._models import CheckExistsPropertyBatchOperation - from ._models import CheckSequencePropertyBatchOperation - from ._models import CheckValuePropertyBatchOperation - from ._models import ClusterConfiguration - from ._models import ClusterConfigurationUpgradeDescription - from ._models import ClusterConfigurationUpgradeStatusInfo - from ._models import ClusterEvent - from ._models import ClusterHealth - from ._models import ClusterHealthChunk - from ._models import ClusterHealthChunkQueryDescription - from ._models import ClusterHealthPolicies - from ._models import ClusterHealthPolicy - from ._models import ClusterHealthReportExpiredEvent - from ._models import ClusterLoadInfo - from ._models import ClusterManifest - from ._models import ClusterNewHealthReportEvent - from ._models import ClusterUpgradeCompletedEvent - from ._models import ClusterUpgradeDescriptionObject - from ._models import ClusterUpgradeDomainCompletedEvent - from ._models import ClusterUpgradeHealthPolicyObject - from ._models import ClusterUpgradeProgressObject - from ._models import ClusterUpgradeRollbackCompletedEvent - from ._models import ClusterUpgradeRollbackStartedEvent - from ._models import ClusterUpgradeStartedEvent - from ._models import ClusterVersion - from ._models import CodePackageEntryPoint - from ._models import CodePackageEntryPointStatistics - from ._models import ComposeDeploymentStatusInfo - from ._models import ComposeDeploymentUpgradeDescription - from ._models import ComposeDeploymentUpgradeProgressInfo - from ._models import ConfigParameterOverride - from ._models import ContainerApiRequestBody - from ._models import ContainerApiResponse - from ._models import ContainerApiResult - from ._models import ContainerCodePackageProperties - from ._models import ContainerEvent - from ._models import ContainerInstanceEvent - from ._models import ContainerInstanceView - from ._models import ContainerLabel - from ._models import ContainerLogs - from ._models import ContainerState - from ._models import CreateComposeDeploymentDescription - from ._models import CurrentUpgradeDomainProgressInfo - from ._models import DeactivationIntentDescription - from ._models import DefaultExecutionPolicy - from ._models import DeletePropertyBatchOperation - from ._models import DeltaNodesCheckHealthEvaluation - from ._models import DeployedApplicationHealth - from ._models import DeployedApplicationHealthEvaluation - from ._models import DeployedApplicationHealthReportExpiredEvent - from ._models import DeployedApplicationHealthState - from ._models import DeployedApplicationHealthStateChunk - from ._models import DeployedApplicationHealthStateChunkList - from ._models import DeployedApplicationHealthStateFilter - from ._models import DeployedApplicationInfo - from ._models import DeployedApplicationNewHealthReportEvent - from ._models import DeployedApplicationsHealthEvaluation - from ._models import DeployedCodePackageInfo - from ._models import DeployedServicePackageHealth - from ._models import DeployedServicePackageHealthEvaluation - from ._models import DeployedServicePackageHealthReportExpiredEvent - from ._models import DeployedServicePackageHealthState - from ._models import DeployedServicePackageHealthStateChunk - from ._models import DeployedServicePackageHealthStateChunkList - from ._models import DeployedServicePackageHealthStateFilter - from ._models import DeployedServicePackageInfo - from ._models import DeployedServicePackageNewHealthReportEvent - from ._models import DeployedServicePackagesHealthEvaluation - from ._models import DeployedServiceReplicaDetailInfo - from ._models import DeployedServiceReplicaInfo - from ._models import DeployedServiceTypeInfo - from ._models import DeployedStatefulServiceReplicaDetailInfo - from ._models import DeployedStatefulServiceReplicaInfo - from ._models import DeployedStatelessServiceInstanceDetailInfo - from ._models import DeployedStatelessServiceInstanceInfo - from ._models import DeployServicePackageToNodeDescription - from ._models import DiagnosticsDescription - from ._models import DiagnosticsRef - from ._models import DiagnosticsSinkProperties - from ._models import DisableBackupDescription - from ._models import DiskInfo - from ._models import DoublePropertyValue - from ._models import DsmsAzureBlobBackupStorageDescription - from ._models import EnableBackupDescription - from ._models import EndpointProperties - from ._models import EndpointRef - from ._models import EnsureAvailabilitySafetyCheck - from ._models import EnsurePartitionQuorumSafetyCheck - from ._models import EntityHealth - from ._models import EntityHealthState - from ._models import EntityHealthStateChunk - from ._models import EntityHealthStateChunkList - from ._models import EntityKindHealthStateCount - from ._models import EnvironmentVariable - from ._models import Epoch - from ._models import EventHealthEvaluation - from ._models import ExecutingFaultsChaosEvent - from ._models import ExecutionPolicy - from ._models import ExternalStoreProvisionApplicationTypeDescription - from ._models import FabricCodeVersionInfo - from ._models import FabricConfigVersionInfo - from ._models import FabricError, FabricErrorException - from ._models import FabricErrorError - from ._models import FabricEvent - from ._models import FailedPropertyBatchInfo - from ._models import FailedUpgradeDomainProgressObject - from ._models import FailureUpgradeDomainProgressInfo - from ._models import FileInfo - from ._models import FileShareBackupStorageDescription - from ._models import FileVersion - from ._models import FolderInfo - from ._models import FolderSizeInfo - from ._models import FrequencyBasedBackupScheduleDescription - from ._models import GatewayDestination - from ._models import GatewayResourceDescription - from ._models import GetBackupByStorageQueryDescription - from ._models import GetPropertyBatchOperation - from ._models import GuidPropertyValue - from ._models import HealthEvaluation - from ._models import HealthEvaluationWrapper - from ._models import HealthEvent - from ._models import HealthInformation - from ._models import HealthStateCount - from ._models import HealthStatistics - from ._models import HttpConfig - from ._models import HttpHostConfig - from ._models import HttpRouteConfig - from ._models import HttpRouteMatchHeader - from ._models import HttpRouteMatchPath - from ._models import HttpRouteMatchRule - from ._models import IdentityDescription - from ._models import IdentityItemDescription - from ._models import ImageRegistryCredential - from ._models import ImageStoreContent - from ._models import ImageStoreCopyDescription - from ._models import ImageStoreInfo - from ._models import InlinedValueSecretResourceProperties - from ._models import Int64PropertyValue - from ._models import Int64RangePartitionInformation - from ._models import InvokeDataLossResult - from ._models import InvokeQuorumLossResult - from ._models import KeyValueStoreReplicaStatus - from ._models import LoadMetricInformation - from ._models import LoadMetricReport - from ._models import LoadMetricReportInfo - from ._models import LocalNetworkResourceProperties - from ._models import ManagedApplicationIdentity - from ._models import ManagedApplicationIdentityDescription - from ._models import MetricLoadDescription - from ._models import MonitoringPolicyDescription - from ._models import NameDescription - from ._models import NamedPartitionInformation - from ._models import NamedPartitionSchemeDescription - from ._models import NetworkRef - from ._models import NetworkResourceDescription - from ._models import NetworkResourceProperties - from ._models import NetworkResourcePropertiesBase - from ._models import NodeAbortedEvent - from ._models import NodeAddedToClusterEvent - from ._models import NodeClosedEvent - from ._models import NodeDeactivateCompletedEvent - from ._models import NodeDeactivateStartedEvent - from ._models import NodeDeactivationInfo - from ._models import NodeDeactivationTask - from ._models import NodeDeactivationTaskId - from ._models import NodeDownEvent - from ._models import NodeEvent - from ._models import NodeHealth - from ._models import NodeHealthEvaluation - from ._models import NodeHealthReportExpiredEvent - from ._models import NodeHealthState - from ._models import NodeHealthStateChunk - from ._models import NodeHealthStateChunkList - from ._models import NodeHealthStateFilter - from ._models import NodeId - from ._models import NodeImpact - from ._models import NodeInfo - from ._models import NodeLoadInfo - from ._models import NodeLoadMetricInformation - from ._models import NodeNewHealthReportEvent - from ._models import NodeOpenFailedEvent - from ._models import NodeOpenSucceededEvent - from ._models import NodeRemovedFromClusterEvent - from ._models import NodeRepairImpactDescription - from ._models import NodeRepairTargetDescription - from ._models import NodeResult - from ._models import NodesHealthEvaluation - from ._models import NodeTransitionProgress - from ._models import NodeTransitionResult - from ._models import NodeUpEvent - from ._models import NodeUpgradeProgressInfo - from ._models import OperationStatus - from ._models import PackageSharingPolicyInfo - from ._models import PagedApplicationInfoList - from ._models import PagedApplicationResourceDescriptionList - from ._models import PagedApplicationTypeInfoList - from ._models import PagedBackupConfigurationInfoList - from ._models import PagedBackupEntityList - from ._models import PagedBackupInfoList - from ._models import PagedBackupPolicyDescriptionList - from ._models import PagedComposeDeploymentStatusInfoList - from ._models import PagedDeployedApplicationInfoList - from ._models import PagedGatewayResourceDescriptionList - from ._models import PagedNetworkResourceDescriptionList - from ._models import PagedNodeInfoList - from ._models import PagedPropertyInfoList - from ._models import PagedReplicaInfoList - from ._models import PagedSecretResourceDescriptionList - from ._models import PagedSecretValueResourceDescriptionList - from ._models import PagedServiceInfoList - from ._models import PagedServicePartitionInfoList - from ._models import PagedServiceReplicaDescriptionList - from ._models import PagedServiceResourceDescriptionList - from ._models import PagedSubNameInfoList - from ._models import PagedUpdatePartitionLoadResultList - from ._models import PagedVolumeResourceDescriptionList - from ._models import PartitionAnalysisEvent - from ._models import PartitionBackupConfigurationInfo - from ._models import PartitionBackupEntity - from ._models import PartitionDataLossProgress - from ._models import PartitionEvent - from ._models import PartitionHealth - from ._models import PartitionHealthEvaluation - from ._models import PartitionHealthReportExpiredEvent - from ._models import PartitionHealthState - from ._models import PartitionHealthStateChunk - from ._models import PartitionHealthStateChunkList - from ._models import PartitionHealthStateFilter - from ._models import PartitionInformation - from ._models import PartitionInstanceCountScaleMechanism - from ._models import PartitionLoadInformation - from ._models import PartitionMetricLoadDescription - from ._models import PartitionNewHealthReportEvent - from ._models import PartitionPrimaryMoveAnalysisEvent - from ._models import PartitionQuorumLossProgress - from ._models import PartitionReconfiguredEvent - from ._models import PartitionRestartProgress - from ._models import PartitionSafetyCheck - from ._models import PartitionSchemeDescription - from ._models import PartitionsHealthEvaluation - from ._models import PrimaryReplicatorStatus - from ._models import Probe - from ._models import ProbeExec - from ._models import ProbeHttpGet - from ._models import ProbeHttpGetHeaders - from ._models import ProbeTcpSocket - from ._models import PropertyBatchDescriptionList - from ._models import PropertyBatchInfo - from ._models import PropertyBatchOperation - from ._models import PropertyDescription - from ._models import PropertyInfo - from ._models import PropertyMetadata - from ._models import PropertyValue - from ._models import ProvisionApplicationTypeDescription - from ._models import ProvisionApplicationTypeDescriptionBase - from ._models import ProvisionFabricDescription - from ._models import PutPropertyBatchOperation - from ._models import ReconfigurationInformation - from ._models import RegistryCredential - from ._models import ReliableCollectionsRef - from ._models import RemoteReplicatorAcknowledgementDetail - from ._models import RemoteReplicatorAcknowledgementStatus - from ._models import RemoteReplicatorStatus - from ._models import RepairImpactDescriptionBase - from ._models import RepairTargetDescriptionBase - from ._models import RepairTask - from ._models import RepairTaskApproveDescription - from ._models import RepairTaskCancelDescription - from ._models import RepairTaskDeleteDescription - from ._models import RepairTaskHistory - from ._models import RepairTaskUpdateHealthPolicyDescription - from ._models import RepairTaskUpdateInfo - from ._models import ReplicaEvent - from ._models import ReplicaHealth - from ._models import ReplicaHealthEvaluation - from ._models import ReplicaHealthState - from ._models import ReplicaHealthStateChunk - from ._models import ReplicaHealthStateChunkList - from ._models import ReplicaHealthStateFilter - from ._models import ReplicaInfo - from ._models import ReplicaMetricLoadDescription - from ._models import ReplicasHealthEvaluation - from ._models import ReplicaStatusBase - from ._models import ReplicatorQueueStatus - from ._models import ReplicatorStatus - from ._models import ResolvedServiceEndpoint - from ._models import ResolvedServicePartition - from ._models import ResourceLimits - from ._models import ResourceRequests - from ._models import ResourceRequirements - from ._models import RestartDeployedCodePackageDescription - from ._models import RestartNodeDescription - from ._models import RestartPartitionResult - from ._models import RestorePartitionDescription - from ._models import RestoreProgressInfo - from ._models import ResumeApplicationUpgradeDescription - from ._models import ResumeClusterUpgradeDescription - from ._models import RetentionPolicyDescription - from ._models import RollingUpgradeUpdateDescription - from ._models import RunToCompletionExecutionPolicy - from ._models import SafetyCheck - from ._models import SafetyCheckWrapper - from ._models import ScalingMechanismDescription - from ._models import ScalingPolicyDescription - from ._models import ScalingTriggerDescription - from ._models import SecondaryActiveReplicatorStatus - from ._models import SecondaryIdleReplicatorStatus - from ._models import SecondaryReplicatorStatus - from ._models import SecretResourceDescription - from ._models import SecretResourceProperties - from ._models import SecretResourcePropertiesBase - from ._models import SecretValue - from ._models import SecretValueProperties - from ._models import SecretValueResourceDescription - from ._models import SeedNodeSafetyCheck - from ._models import SelectedPartition - from ._models import ServiceBackupConfigurationInfo - from ._models import ServiceBackupEntity - from ._models import ServiceCorrelationDescription - from ._models import ServiceCreatedEvent - from ._models import ServiceDeletedEvent - from ._models import ServiceDescription - from ._models import ServiceEvent - from ._models import ServiceFromTemplateDescription - from ._models import ServiceHealth - from ._models import ServiceHealthEvaluation - from ._models import ServiceHealthReportExpiredEvent - from ._models import ServiceHealthState - from ._models import ServiceHealthStateChunk - from ._models import ServiceHealthStateChunkList - from ._models import ServiceHealthStateFilter - from ._models import ServiceIdentity - from ._models import ServiceInfo - from ._models import ServiceLoadMetricDescription - from ._models import ServiceNameInfo - from ._models import ServiceNewHealthReportEvent - from ._models import ServicePartitionInfo - from ._models import ServicePlacementInvalidDomainPolicyDescription - from ._models import ServicePlacementNonPartiallyPlaceServicePolicyDescription - from ._models import ServicePlacementPolicyDescription - from ._models import ServicePlacementPreferPrimaryDomainPolicyDescription - from ._models import ServicePlacementRequiredDomainPolicyDescription - from ._models import ServicePlacementRequireDomainDistributionPolicyDescription - from ._models import ServiceProperties - from ._models import ServiceReplicaDescription - from ._models import ServiceReplicaProperties - from ._models import ServiceResourceDescription - from ._models import ServicesHealthEvaluation - from ._models import ServiceTypeDescription - from ._models import ServiceTypeExtensionDescription - from ._models import ServiceTypeHealthPolicy - from ._models import ServiceTypeHealthPolicyMapItem - from ._models import ServiceTypeInfo - from ._models import ServiceTypeManifest - from ._models import ServiceUpdateDescription - from ._models import ServiceUpgradeProgress - from ._models import Setting - from ._models import SingletonPartitionInformation - from ._models import SingletonPartitionSchemeDescription - from ._models import StartClusterUpgradeDescription - from ._models import StartedChaosEvent - from ._models import StatefulReplicaHealthReportExpiredEvent - from ._models import StatefulReplicaNewHealthReportEvent - from ._models import StatefulServiceDescription - from ._models import StatefulServiceInfo - from ._models import StatefulServicePartitionInfo - from ._models import StatefulServiceReplicaHealth - from ._models import StatefulServiceReplicaHealthState - from ._models import StatefulServiceReplicaInfo - from ._models import StatefulServiceTypeDescription - from ._models import StatefulServiceUpdateDescription - from ._models import StatelessReplicaHealthReportExpiredEvent - from ._models import StatelessReplicaNewHealthReportEvent - from ._models import StatelessServiceDescription - from ._models import StatelessServiceInfo - from ._models import StatelessServiceInstanceHealth - from ._models import StatelessServiceInstanceHealthState - from ._models import StatelessServiceInstanceInfo - from ._models import StatelessServicePartitionInfo - from ._models import StatelessServiceTypeDescription - from ._models import StatelessServiceUpdateDescription - from ._models import StoppedChaosEvent - from ._models import StringPropertyValue - from ._models import SuccessfulPropertyBatchInfo - from ._models import SystemApplicationHealthEvaluation - from ._models import TcpConfig - from ._models import TestErrorChaosEvent - from ._models import TimeBasedBackupScheduleDescription - from ._models import TimeOfDay - from ._models import TimeRange - from ._models import UniformInt64RangePartitionSchemeDescription - from ._models import UnplacedReplicaInformation - from ._models import UnprovisionApplicationTypeDescriptionInfo - from ._models import UnprovisionFabricDescription - from ._models import UpdateClusterUpgradeDescription - from ._models import UpdatePartitionLoadResult - from ._models import UpgradeDomainDeltaNodesCheckHealthEvaluation - from ._models import UpgradeDomainInfo - from ._models import UpgradeDomainNodesHealthEvaluation - from ._models import UpgradeOrchestrationServiceState - from ._models import UpgradeOrchestrationServiceStateSummary - from ._models import UploadChunkRange - from ._models import UploadSession - from ._models import UploadSessionInfo - from ._models import UsageInfo - from ._models import ValidationFailedChaosEvent - from ._models import VolumeProviderParametersAzureFile - from ._models import VolumeReference - from ._models import VolumeResourceDescription - from ._models import WaitForInbuildReplicaSafetyCheck - from ._models import WaitForPrimaryPlacementSafetyCheck - from ._models import WaitForPrimarySwapSafetyCheck - from ._models import WaitForReconfigurationSafetyCheck - from ._models import WaitingChaosEvent -from ._service_fabric_client_ap_is_enums import ( + from ._models import AadMetadata # type: ignore + from ._models import AadMetadataObject # type: ignore + from ._models import AddRemoveIncrementalNamedPartitionScalingMechanism # type: ignore + from ._models import AddRemoveReplicaScalingMechanism # type: ignore + from ._models import AnalysisEventMetadata # type: ignore + from ._models import ApplicationBackupConfigurationInfo # type: ignore + from ._models import ApplicationBackupEntity # type: ignore + from ._models import ApplicationCapacityDescription # type: ignore + from ._models import ApplicationContainerInstanceExitedEvent # type: ignore + from ._models import ApplicationCreatedEvent # type: ignore + from ._models import ApplicationDeletedEvent # type: ignore + from ._models import ApplicationDescription # type: ignore + from ._models import ApplicationEvent # type: ignore + from ._models import ApplicationHealth # type: ignore + from ._models import ApplicationHealthEvaluation # type: ignore + from ._models import ApplicationHealthPolicies # type: ignore + from ._models import ApplicationHealthPolicy # type: ignore + from ._models import ApplicationHealthPolicyMapItem # type: ignore + from ._models import ApplicationHealthPolicyMapObject # type: ignore + from ._models import ApplicationHealthReportExpiredEvent # type: ignore + from ._models import ApplicationHealthState # type: ignore + from ._models import ApplicationHealthStateChunk # type: ignore + from ._models import ApplicationHealthStateChunkList # type: ignore + from ._models import ApplicationHealthStateFilter # type: ignore + from ._models import ApplicationInfo # type: ignore + from ._models import ApplicationLoadInfo # type: ignore + from ._models import ApplicationLoadMetricInformation # type: ignore + from ._models import ApplicationMetricDescription # type: ignore + from ._models import ApplicationNameInfo # type: ignore + from ._models import ApplicationNewHealthReportEvent # type: ignore + from ._models import ApplicationParameter # type: ignore + from ._models import ApplicationProcessExitedEvent # type: ignore + from ._models import ApplicationResourceDescription # type: ignore + from ._models import ApplicationResourceUpgradeProgressInfo # type: ignore + from ._models import ApplicationScopedVolume # type: ignore + from ._models import ApplicationScopedVolumeCreationParameters # type: ignore + from ._models import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk # type: ignore + from ._models import ApplicationTypeApplicationsHealthEvaluation # type: ignore + from ._models import ApplicationTypeHealthPolicyMapItem # type: ignore + from ._models import ApplicationTypeImageStorePath # type: ignore + from ._models import ApplicationTypeInfo # type: ignore + from ._models import ApplicationTypeManifest # type: ignore + from ._models import ApplicationUpgradeCompletedEvent # type: ignore + from ._models import ApplicationUpgradeDescription # type: ignore + from ._models import ApplicationUpgradeDomainCompletedEvent # type: ignore + from ._models import ApplicationUpgradeProgressInfo # type: ignore + from ._models import ApplicationUpgradeRollbackCompletedEvent # type: ignore + from ._models import ApplicationUpgradeRollbackStartedEvent # type: ignore + from ._models import ApplicationUpgradeStartedEvent # type: ignore + from ._models import ApplicationUpgradeUpdateDescription # type: ignore + from ._models import ApplicationsHealthEvaluation # type: ignore + from ._models import AutoScalingMechanism # type: ignore + from ._models import AutoScalingMetric # type: ignore + from ._models import AutoScalingPolicy # type: ignore + from ._models import AutoScalingResourceMetric # type: ignore + from ._models import AutoScalingTrigger # type: ignore + from ._models import AverageLoadScalingTrigger # type: ignore + from ._models import AveragePartitionLoadScalingTrigger # type: ignore + from ._models import AverageServiceLoadScalingTrigger # type: ignore + from ._models import AzureBlobBackupStorageDescription # type: ignore + from ._models import AzureInternalMonitoringPipelineSinkDescription # type: ignore + from ._models import BackupConfigurationInfo # type: ignore + from ._models import BackupEntity # type: ignore + from ._models import BackupInfo # type: ignore + from ._models import BackupPartitionDescription # type: ignore + from ._models import BackupPolicyDescription # type: ignore + from ._models import BackupProgressInfo # type: ignore + from ._models import BackupScheduleDescription # type: ignore + from ._models import BackupStorageDescription # type: ignore + from ._models import BackupSuspensionInfo # type: ignore + from ._models import BasicRetentionPolicyDescription # type: ignore + from ._models import BinaryPropertyValue # type: ignore + from ._models import Chaos # type: ignore + from ._models import ChaosCodePackageRestartScheduledEvent # type: ignore + from ._models import ChaosContext # type: ignore + from ._models import ChaosEvent # type: ignore + from ._models import ChaosEventWrapper # type: ignore + from ._models import ChaosEventsSegment # type: ignore + from ._models import ChaosNodeRestartScheduledEvent # type: ignore + from ._models import ChaosParameters # type: ignore + from ._models import ChaosParametersDictionaryItem # type: ignore + from ._models import ChaosPartitionPrimaryMoveScheduledEvent # type: ignore + from ._models import ChaosPartitionSecondaryMoveScheduledEvent # type: ignore + from ._models import ChaosReplicaRemovalScheduledEvent # type: ignore + from ._models import ChaosReplicaRestartScheduledEvent # type: ignore + from ._models import ChaosSchedule # type: ignore + from ._models import ChaosScheduleDescription # type: ignore + from ._models import ChaosScheduleJob # type: ignore + from ._models import ChaosScheduleJobActiveDaysOfWeek # type: ignore + from ._models import ChaosStartedEvent # type: ignore + from ._models import ChaosStoppedEvent # type: ignore + from ._models import ChaosTargetFilter # type: ignore + from ._models import CheckExistsPropertyBatchOperation # type: ignore + from ._models import CheckSequencePropertyBatchOperation # type: ignore + from ._models import CheckValuePropertyBatchOperation # type: ignore + from ._models import ClusterConfiguration # type: ignore + from ._models import ClusterConfigurationUpgradeDescription # type: ignore + from ._models import ClusterConfigurationUpgradeStatusInfo # type: ignore + from ._models import ClusterEvent # type: ignore + from ._models import ClusterHealth # type: ignore + from ._models import ClusterHealthChunk # type: ignore + from ._models import ClusterHealthChunkQueryDescription # type: ignore + from ._models import ClusterHealthPolicies # type: ignore + from ._models import ClusterHealthPolicy # type: ignore + from ._models import ClusterHealthReportExpiredEvent # type: ignore + from ._models import ClusterLoadInfo # type: ignore + from ._models import ClusterManifest # type: ignore + from ._models import ClusterNewHealthReportEvent # type: ignore + from ._models import ClusterUpgradeCompletedEvent # type: ignore + from ._models import ClusterUpgradeDescriptionObject # type: ignore + from ._models import ClusterUpgradeDomainCompletedEvent # type: ignore + from ._models import ClusterUpgradeHealthPolicyObject # type: ignore + from ._models import ClusterUpgradeProgressObject # type: ignore + from ._models import ClusterUpgradeRollbackCompletedEvent # type: ignore + from ._models import ClusterUpgradeRollbackStartedEvent # type: ignore + from ._models import ClusterUpgradeStartedEvent # type: ignore + from ._models import ClusterVersion # type: ignore + from ._models import CodePackageEntryPoint # type: ignore + from ._models import CodePackageEntryPointStatistics # type: ignore + from ._models import ComposeDeploymentStatusInfo # type: ignore + from ._models import ComposeDeploymentUpgradeDescription # type: ignore + from ._models import ComposeDeploymentUpgradeProgressInfo # type: ignore + from ._models import ConfigParameterOverride # type: ignore + from ._models import ContainerApiRequestBody # type: ignore + from ._models import ContainerApiResponse # type: ignore + from ._models import ContainerApiResult # type: ignore + from ._models import ContainerCodePackageProperties # type: ignore + from ._models import ContainerEvent # type: ignore + from ._models import ContainerInstanceEvent # type: ignore + from ._models import ContainerInstanceView # type: ignore + from ._models import ContainerLabel # type: ignore + from ._models import ContainerLogs # type: ignore + from ._models import ContainerState # type: ignore + from ._models import CreateComposeDeploymentDescription # type: ignore + from ._models import CurrentUpgradeDomainProgressInfo # type: ignore + from ._models import DeactivationIntentDescription # type: ignore + from ._models import DefaultExecutionPolicy # type: ignore + from ._models import DeletePropertyBatchOperation # type: ignore + from ._models import DeltaNodesCheckHealthEvaluation # type: ignore + from ._models import DeployServicePackageToNodeDescription # type: ignore + from ._models import DeployedApplicationHealth # type: ignore + from ._models import DeployedApplicationHealthEvaluation # type: ignore + from ._models import DeployedApplicationHealthReportExpiredEvent # type: ignore + from ._models import DeployedApplicationHealthState # type: ignore + from ._models import DeployedApplicationHealthStateChunk # type: ignore + from ._models import DeployedApplicationHealthStateChunkList # type: ignore + from ._models import DeployedApplicationHealthStateFilter # type: ignore + from ._models import DeployedApplicationInfo # type: ignore + from ._models import DeployedApplicationNewHealthReportEvent # type: ignore + from ._models import DeployedApplicationsHealthEvaluation # type: ignore + from ._models import DeployedCodePackageInfo # type: ignore + from ._models import DeployedServicePackageHealth # type: ignore + from ._models import DeployedServicePackageHealthEvaluation # type: ignore + from ._models import DeployedServicePackageHealthReportExpiredEvent # type: ignore + from ._models import DeployedServicePackageHealthState # type: ignore + from ._models import DeployedServicePackageHealthStateChunk # type: ignore + from ._models import DeployedServicePackageHealthStateChunkList # type: ignore + from ._models import DeployedServicePackageHealthStateFilter # type: ignore + from ._models import DeployedServicePackageInfo # type: ignore + from ._models import DeployedServicePackageNewHealthReportEvent # type: ignore + from ._models import DeployedServicePackagesHealthEvaluation # type: ignore + from ._models import DeployedServiceReplicaDetailInfo # type: ignore + from ._models import DeployedServiceReplicaInfo # type: ignore + from ._models import DeployedServiceTypeInfo # type: ignore + from ._models import DeployedStatefulServiceReplicaDetailInfo # type: ignore + from ._models import DeployedStatefulServiceReplicaInfo # type: ignore + from ._models import DeployedStatelessServiceInstanceDetailInfo # type: ignore + from ._models import DeployedStatelessServiceInstanceInfo # type: ignore + from ._models import DiagnosticsDescription # type: ignore + from ._models import DiagnosticsRef # type: ignore + from ._models import DiagnosticsSinkProperties # type: ignore + from ._models import DisableBackupDescription # type: ignore + from ._models import DiskInfo # type: ignore + from ._models import DoublePropertyValue # type: ignore + from ._models import DsmsAzureBlobBackupStorageDescription # type: ignore + from ._models import EnableBackupDescription # type: ignore + from ._models import EndpointProperties # type: ignore + from ._models import EndpointRef # type: ignore + from ._models import EnsureAvailabilitySafetyCheck # type: ignore + from ._models import EnsurePartitionQuorumSafetyCheck # type: ignore + from ._models import EntityHealth # type: ignore + from ._models import EntityHealthState # type: ignore + from ._models import EntityHealthStateChunk # type: ignore + from ._models import EntityHealthStateChunkList # type: ignore + from ._models import EntityKindHealthStateCount # type: ignore + from ._models import EnvironmentVariable # type: ignore + from ._models import Epoch # type: ignore + from ._models import EventHealthEvaluation # type: ignore + from ._models import ExecutingFaultsChaosEvent # type: ignore + from ._models import ExecutionPolicy # type: ignore + from ._models import ExternalStoreProvisionApplicationTypeDescription # type: ignore + from ._models import FabricCodeVersionInfo # type: ignore + from ._models import FabricConfigVersionInfo # type: ignore + from ._models import FabricError # type: ignore + from ._models import FabricErrorError # type: ignore + from ._models import FabricEvent # type: ignore + from ._models import FailedPropertyBatchInfo # type: ignore + from ._models import FailedUpgradeDomainProgressObject # type: ignore + from ._models import FailureUpgradeDomainProgressInfo # type: ignore + from ._models import FileInfo # type: ignore + from ._models import FileShareBackupStorageDescription # type: ignore + from ._models import FileVersion # type: ignore + from ._models import FolderInfo # type: ignore + from ._models import FolderSizeInfo # type: ignore + from ._models import FrequencyBasedBackupScheduleDescription # type: ignore + from ._models import GatewayDestination # type: ignore + from ._models import GatewayResourceDescription # type: ignore + from ._models import GetBackupByStorageQueryDescription # type: ignore + from ._models import GetPropertyBatchOperation # type: ignore + from ._models import GuidPropertyValue # type: ignore + from ._models import HealthEvaluation # type: ignore + from ._models import HealthEvaluationWrapper # type: ignore + from ._models import HealthEvent # type: ignore + from ._models import HealthInformation # type: ignore + from ._models import HealthStateCount # type: ignore + from ._models import HealthStatistics # type: ignore + from ._models import HttpConfig # type: ignore + from ._models import HttpHostConfig # type: ignore + from ._models import HttpRouteConfig # type: ignore + from ._models import HttpRouteMatchHeader # type: ignore + from ._models import HttpRouteMatchPath # type: ignore + from ._models import HttpRouteMatchRule # type: ignore + from ._models import IdentityDescription # type: ignore + from ._models import IdentityItemDescription # type: ignore + from ._models import ImageRegistryCredential # type: ignore + from ._models import ImageStoreContent # type: ignore + from ._models import ImageStoreCopyDescription # type: ignore + from ._models import ImageStoreInfo # type: ignore + from ._models import InlinedValueSecretResourceProperties # type: ignore + from ._models import InstanceLifecycleDescription # type: ignore + from ._models import Int64PropertyValue # type: ignore + from ._models import Int64RangePartitionInformation # type: ignore + from ._models import InvokeDataLossResult # type: ignore + from ._models import InvokeQuorumLossResult # type: ignore + from ._models import KeyValueStoreReplicaStatus # type: ignore + from ._models import LoadMetricInformation # type: ignore + from ._models import LoadMetricReport # type: ignore + from ._models import LoadMetricReportInfo # type: ignore + from ._models import LoadedPartitionInformationQueryDescription # type: ignore + from ._models import LoadedPartitionInformationResult # type: ignore + from ._models import LoadedPartitionInformationResultList # type: ignore + from ._models import LocalNetworkResourceProperties # type: ignore + from ._models import ManagedApplicationIdentity # type: ignore + from ._models import ManagedApplicationIdentityDescription # type: ignore + from ._models import ManagedIdentityAzureBlobBackupStorageDescription # type: ignore + from ._models import MetricLoadDescription # type: ignore + from ._models import MonitoringPolicyDescription # type: ignore + from ._models import NameDescription # type: ignore + from ._models import NamedPartitionInformation # type: ignore + from ._models import NamedPartitionSchemeDescription # type: ignore + from ._models import NetworkRef # type: ignore + from ._models import NetworkResourceDescription # type: ignore + from ._models import NetworkResourceProperties # type: ignore + from ._models import NetworkResourcePropertiesBase # type: ignore + from ._models import NodeAbortedEvent # type: ignore + from ._models import NodeAddedToClusterEvent # type: ignore + from ._models import NodeClosedEvent # type: ignore + from ._models import NodeDeactivateCompletedEvent # type: ignore + from ._models import NodeDeactivateStartedEvent # type: ignore + from ._models import NodeDeactivationInfo # type: ignore + from ._models import NodeDeactivationTask # type: ignore + from ._models import NodeDeactivationTaskId # type: ignore + from ._models import NodeDownEvent # type: ignore + from ._models import NodeEvent # type: ignore + from ._models import NodeHealth # type: ignore + from ._models import NodeHealthEvaluation # type: ignore + from ._models import NodeHealthReportExpiredEvent # type: ignore + from ._models import NodeHealthState # type: ignore + from ._models import NodeHealthStateChunk # type: ignore + from ._models import NodeHealthStateChunkList # type: ignore + from ._models import NodeHealthStateFilter # type: ignore + from ._models import NodeId # type: ignore + from ._models import NodeImpact # type: ignore + from ._models import NodeInfo # type: ignore + from ._models import NodeLoadInfo # type: ignore + from ._models import NodeLoadMetricInformation # type: ignore + from ._models import NodeNewHealthReportEvent # type: ignore + from ._models import NodeOpenFailedEvent # type: ignore + from ._models import NodeOpenSucceededEvent # type: ignore + from ._models import NodeRemovedFromClusterEvent # type: ignore + from ._models import NodeRepairImpactDescription # type: ignore + from ._models import NodeRepairTargetDescription # type: ignore + from ._models import NodeResult # type: ignore + from ._models import NodeTagsDescription # type: ignore + from ._models import NodeTransitionProgress # type: ignore + from ._models import NodeTransitionResult # type: ignore + from ._models import NodeTypeHealthPolicyMapItem # type: ignore + from ._models import NodeTypeNodesHealthEvaluation # type: ignore + from ._models import NodeUpEvent # type: ignore + from ._models import NodeUpgradeProgressInfo # type: ignore + from ._models import NodesHealthEvaluation # type: ignore + from ._models import OperationStatus # type: ignore + from ._models import PackageSharingPolicyInfo # type: ignore + from ._models import PagedApplicationInfoList # type: ignore + from ._models import PagedApplicationResourceDescriptionList # type: ignore + from ._models import PagedApplicationTypeInfoList # type: ignore + from ._models import PagedBackupConfigurationInfoList # type: ignore + from ._models import PagedBackupEntityList # type: ignore + from ._models import PagedBackupInfoList # type: ignore + from ._models import PagedBackupPolicyDescriptionList # type: ignore + from ._models import PagedComposeDeploymentStatusInfoList # type: ignore + from ._models import PagedDeployedApplicationInfoList # type: ignore + from ._models import PagedGatewayResourceDescriptionList # type: ignore + from ._models import PagedNetworkResourceDescriptionList # type: ignore + from ._models import PagedNodeInfoList # type: ignore + from ._models import PagedPropertyInfoList # type: ignore + from ._models import PagedReplicaInfoList # type: ignore + from ._models import PagedSecretResourceDescriptionList # type: ignore + from ._models import PagedSecretValueResourceDescriptionList # type: ignore + from ._models import PagedServiceInfoList # type: ignore + from ._models import PagedServicePartitionInfoList # type: ignore + from ._models import PagedServiceReplicaDescriptionList # type: ignore + from ._models import PagedServiceResourceDescriptionList # type: ignore + from ._models import PagedSubNameInfoList # type: ignore + from ._models import PagedUpdatePartitionLoadResultList # type: ignore + from ._models import PagedVolumeResourceDescriptionList # type: ignore + from ._models import PartitionAnalysisEvent # type: ignore + from ._models import PartitionBackupConfigurationInfo # type: ignore + from ._models import PartitionBackupEntity # type: ignore + from ._models import PartitionDataLossProgress # type: ignore + from ._models import PartitionEvent # type: ignore + from ._models import PartitionHealth # type: ignore + from ._models import PartitionHealthEvaluation # type: ignore + from ._models import PartitionHealthReportExpiredEvent # type: ignore + from ._models import PartitionHealthState # type: ignore + from ._models import PartitionHealthStateChunk # type: ignore + from ._models import PartitionHealthStateChunkList # type: ignore + from ._models import PartitionHealthStateFilter # type: ignore + from ._models import PartitionInformation # type: ignore + from ._models import PartitionInstanceCountScaleMechanism # type: ignore + from ._models import PartitionLoadInformation # type: ignore + from ._models import PartitionMetricLoadDescription # type: ignore + from ._models import PartitionNewHealthReportEvent # type: ignore + from ._models import PartitionPrimaryMoveAnalysisEvent # type: ignore + from ._models import PartitionQuorumLossProgress # type: ignore + from ._models import PartitionReconfiguredEvent # type: ignore + from ._models import PartitionRestartProgress # type: ignore + from ._models import PartitionSafetyCheck # type: ignore + from ._models import PartitionSchemeDescription # type: ignore + from ._models import PartitionsHealthEvaluation # type: ignore + from ._models import PrimaryReplicatorStatus # type: ignore + from ._models import Probe # type: ignore + from ._models import ProbeExec # type: ignore + from ._models import ProbeHttpGet # type: ignore + from ._models import ProbeHttpGetHeaders # type: ignore + from ._models import ProbeTcpSocket # type: ignore + from ._models import PropertyBatchDescriptionList # type: ignore + from ._models import PropertyBatchInfo # type: ignore + from ._models import PropertyBatchOperation # type: ignore + from ._models import PropertyDescription # type: ignore + from ._models import PropertyInfo # type: ignore + from ._models import PropertyMetadata # type: ignore + from ._models import PropertyValue # type: ignore + from ._models import ProvisionApplicationTypeDescription # type: ignore + from ._models import ProvisionApplicationTypeDescriptionBase # type: ignore + from ._models import ProvisionFabricDescription # type: ignore + from ._models import PutPropertyBatchOperation # type: ignore + from ._models import ReconfigurationInformation # type: ignore + from ._models import RegistryCredential # type: ignore + from ._models import ReliableCollectionsRef # type: ignore + from ._models import RemoteReplicatorAcknowledgementDetail # type: ignore + from ._models import RemoteReplicatorAcknowledgementStatus # type: ignore + from ._models import RemoteReplicatorStatus # type: ignore + from ._models import RepairImpactDescriptionBase # type: ignore + from ._models import RepairTargetDescriptionBase # type: ignore + from ._models import RepairTask # type: ignore + from ._models import RepairTaskApproveDescription # type: ignore + from ._models import RepairTaskCancelDescription # type: ignore + from ._models import RepairTaskDeleteDescription # type: ignore + from ._models import RepairTaskHistory # type: ignore + from ._models import RepairTaskUpdateHealthPolicyDescription # type: ignore + from ._models import RepairTaskUpdateInfo # type: ignore + from ._models import ReplicaEvent # type: ignore + from ._models import ReplicaHealth # type: ignore + from ._models import ReplicaHealthEvaluation # type: ignore + from ._models import ReplicaHealthState # type: ignore + from ._models import ReplicaHealthStateChunk # type: ignore + from ._models import ReplicaHealthStateChunkList # type: ignore + from ._models import ReplicaHealthStateFilter # type: ignore + from ._models import ReplicaInfo # type: ignore + from ._models import ReplicaLifecycleDescription # type: ignore + from ._models import ReplicaMetricLoadDescription # type: ignore + from ._models import ReplicaStatusBase # type: ignore + from ._models import ReplicasHealthEvaluation # type: ignore + from ._models import ReplicatorQueueStatus # type: ignore + from ._models import ReplicatorStatus # type: ignore + from ._models import ResolvedServiceEndpoint # type: ignore + from ._models import ResolvedServicePartition # type: ignore + from ._models import ResourceLimits # type: ignore + from ._models import ResourceRequests # type: ignore + from ._models import ResourceRequirements # type: ignore + from ._models import RestartDeployedCodePackageDescription # type: ignore + from ._models import RestartNodeDescription # type: ignore + from ._models import RestartPartitionResult # type: ignore + from ._models import RestorePartitionDescription # type: ignore + from ._models import RestoreProgressInfo # type: ignore + from ._models import ResumeApplicationUpgradeDescription # type: ignore + from ._models import ResumeClusterUpgradeDescription # type: ignore + from ._models import RetentionPolicyDescription # type: ignore + from ._models import RollingUpgradeUpdateDescription # type: ignore + from ._models import RunToCompletionExecutionPolicy # type: ignore + from ._models import SafetyCheck # type: ignore + from ._models import SafetyCheckWrapper # type: ignore + from ._models import ScalingMechanismDescription # type: ignore + from ._models import ScalingPolicyDescription # type: ignore + from ._models import ScalingTriggerDescription # type: ignore + from ._models import SecondaryActiveReplicatorStatus # type: ignore + from ._models import SecondaryIdleReplicatorStatus # type: ignore + from ._models import SecondaryReplicatorStatus # type: ignore + from ._models import SecretResourceDescription # type: ignore + from ._models import SecretResourceProperties # type: ignore + from ._models import SecretResourcePropertiesBase # type: ignore + from ._models import SecretValue # type: ignore + from ._models import SecretValueProperties # type: ignore + from ._models import SecretValueResourceDescription # type: ignore + from ._models import SecretValueResourceProperties # type: ignore + from ._models import SeedNodeSafetyCheck # type: ignore + from ._models import SelectedPartition # type: ignore + from ._models import ServiceBackupConfigurationInfo # type: ignore + from ._models import ServiceBackupEntity # type: ignore + from ._models import ServiceCorrelationDescription # type: ignore + from ._models import ServiceCreatedEvent # type: ignore + from ._models import ServiceDeletedEvent # type: ignore + from ._models import ServiceDescription # type: ignore + from ._models import ServiceEvent # type: ignore + from ._models import ServiceFromTemplateDescription # type: ignore + from ._models import ServiceHealth # type: ignore + from ._models import ServiceHealthEvaluation # type: ignore + from ._models import ServiceHealthReportExpiredEvent # type: ignore + from ._models import ServiceHealthState # type: ignore + from ._models import ServiceHealthStateChunk # type: ignore + from ._models import ServiceHealthStateChunkList # type: ignore + from ._models import ServiceHealthStateFilter # type: ignore + from ._models import ServiceIdentity # type: ignore + from ._models import ServiceInfo # type: ignore + from ._models import ServiceLoadMetricDescription # type: ignore + from ._models import ServiceNameInfo # type: ignore + from ._models import ServiceNewHealthReportEvent # type: ignore + from ._models import ServicePartitionInfo # type: ignore + from ._models import ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription # type: ignore + from ._models import ServicePlacementInvalidDomainPolicyDescription # type: ignore + from ._models import ServicePlacementNonPartiallyPlaceServicePolicyDescription # type: ignore + from ._models import ServicePlacementPolicyDescription # type: ignore + from ._models import ServicePlacementPreferPrimaryDomainPolicyDescription # type: ignore + from ._models import ServicePlacementRequireDomainDistributionPolicyDescription # type: ignore + from ._models import ServicePlacementRequiredDomainPolicyDescription # type: ignore + from ._models import ServiceProperties # type: ignore + from ._models import ServiceReplicaDescription # type: ignore + from ._models import ServiceReplicaProperties # type: ignore + from ._models import ServiceResourceDescription # type: ignore + from ._models import ServiceResourceProperties # type: ignore + from ._models import ServiceTypeDescription # type: ignore + from ._models import ServiceTypeExtensionDescription # type: ignore + from ._models import ServiceTypeHealthPolicy # type: ignore + from ._models import ServiceTypeHealthPolicyMapItem # type: ignore + from ._models import ServiceTypeInfo # type: ignore + from ._models import ServiceTypeManifest # type: ignore + from ._models import ServiceUpdateDescription # type: ignore + from ._models import ServiceUpgradeProgress # type: ignore + from ._models import ServicesHealthEvaluation # type: ignore + from ._models import Setting # type: ignore + from ._models import SingletonPartitionInformation # type: ignore + from ._models import SingletonPartitionSchemeDescription # type: ignore + from ._models import StartClusterUpgradeDescription # type: ignore + from ._models import StartedChaosEvent # type: ignore + from ._models import StatefulReplicaHealthReportExpiredEvent # type: ignore + from ._models import StatefulReplicaNewHealthReportEvent # type: ignore + from ._models import StatefulServiceDescription # type: ignore + from ._models import StatefulServiceInfo # type: ignore + from ._models import StatefulServicePartitionInfo # type: ignore + from ._models import StatefulServiceReplicaHealth # type: ignore + from ._models import StatefulServiceReplicaHealthState # type: ignore + from ._models import StatefulServiceReplicaInfo # type: ignore + from ._models import StatefulServiceTypeDescription # type: ignore + from ._models import StatefulServiceUpdateDescription # type: ignore + from ._models import StatelessReplicaHealthReportExpiredEvent # type: ignore + from ._models import StatelessReplicaNewHealthReportEvent # type: ignore + from ._models import StatelessServiceDescription # type: ignore + from ._models import StatelessServiceInfo # type: ignore + from ._models import StatelessServiceInstanceHealth # type: ignore + from ._models import StatelessServiceInstanceHealthState # type: ignore + from ._models import StatelessServiceInstanceInfo # type: ignore + from ._models import StatelessServicePartitionInfo # type: ignore + from ._models import StatelessServiceTypeDescription # type: ignore + from ._models import StatelessServiceUpdateDescription # type: ignore + from ._models import StoppedChaosEvent # type: ignore + from ._models import StringPropertyValue # type: ignore + from ._models import SuccessfulPropertyBatchInfo # type: ignore + from ._models import SystemApplicationHealthEvaluation # type: ignore + from ._models import TcpConfig # type: ignore + from ._models import TestErrorChaosEvent # type: ignore + from ._models import TimeBasedBackupScheduleDescription # type: ignore + from ._models import TimeOfDay # type: ignore + from ._models import TimeRange # type: ignore + from ._models import UniformInt64RangePartitionSchemeDescription # type: ignore + from ._models import UnplacedReplicaInformation # type: ignore + from ._models import UnprovisionApplicationTypeDescriptionInfo # type: ignore + from ._models import UnprovisionFabricDescription # type: ignore + from ._models import UpdateClusterUpgradeDescription # type: ignore + from ._models import UpdatePartitionLoadResult # type: ignore + from ._models import UpgradeDomainDeltaNodesCheckHealthEvaluation # type: ignore + from ._models import UpgradeDomainInfo # type: ignore + from ._models import UpgradeDomainNodesHealthEvaluation # type: ignore + from ._models import UpgradeOrchestrationServiceState # type: ignore + from ._models import UpgradeOrchestrationServiceStateSummary # type: ignore + from ._models import UploadChunkRange # type: ignore + from ._models import UploadSession # type: ignore + from ._models import UploadSessionInfo # type: ignore + from ._models import UsageInfo # type: ignore + from ._models import ValidationFailedChaosEvent # type: ignore + from ._models import VolumeProviderParametersAzureFile # type: ignore + from ._models import VolumeReference # type: ignore + from ._models import VolumeResourceDescription # type: ignore + from ._models import WaitForInbuildReplicaSafetyCheck # type: ignore + from ._models import WaitForPrimaryPlacementSafetyCheck # type: ignore + from ._models import WaitForPrimarySwapSafetyCheck # type: ignore + from ._models import WaitForReconfigurationSafetyCheck # type: ignore + from ._models import WaitingChaosEvent # type: ignore + +from ._service_fabric_client_apis_enums import ( ApplicationDefinitionKind, ApplicationPackageCleanupPolicy, ApplicationResourceUpgradeState, @@ -1068,9 +1090,11 @@ HealthEvaluationKind, HealthState, HostIsolationMode, + HostOptions, HostType, ImageRegistryPasswordType, ImpactLevel, + ManagedIdentityType, MoveCost, NetworkKind, NodeDeactivationIntent, @@ -1083,9 +1107,11 @@ OperatingSystemType, OperationState, OperationType, + Ordering, PackageSharingPolicyScope, PartitionAccessStatus, PartitionScheme, + PathMatchType, PropertyBatchInfoKind, PropertyBatchOperationKind, PropertyValueKind, @@ -1174,7 +1200,6 @@ 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', - 'ApplicationsHealthEvaluation', 'ApplicationTypeApplicationsHealthEvaluation', 'ApplicationTypeHealthPolicyMapItem', 'ApplicationTypeImageStorePath', @@ -1188,6 +1213,7 @@ 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStartedEvent', 'ApplicationUpgradeUpdateDescription', + 'ApplicationsHealthEvaluation', 'AutoScalingMechanism', 'AutoScalingMetric', 'AutoScalingPolicy', @@ -1213,8 +1239,8 @@ 'ChaosCodePackageRestartScheduledEvent', 'ChaosContext', 'ChaosEvent', - 'ChaosEventsSegment', 'ChaosEventWrapper', + 'ChaosEventsSegment', 'ChaosNodeRestartScheduledEvent', 'ChaosParameters', 'ChaosParametersDictionaryItem', @@ -1276,6 +1302,7 @@ 'DefaultExecutionPolicy', 'DeletePropertyBatchOperation', 'DeltaNodesCheckHealthEvaluation', + 'DeployServicePackageToNodeDescription', 'DeployedApplicationHealth', 'DeployedApplicationHealthEvaluation', 'DeployedApplicationHealthReportExpiredEvent', @@ -1304,7 +1331,6 @@ 'DeployedStatefulServiceReplicaInfo', 'DeployedStatelessServiceInstanceDetailInfo', 'DeployedStatelessServiceInstanceInfo', - 'DeployServicePackageToNodeDescription', 'DiagnosticsDescription', 'DiagnosticsRef', 'DiagnosticsSinkProperties', @@ -1330,7 +1356,7 @@ 'ExternalStoreProvisionApplicationTypeDescription', 'FabricCodeVersionInfo', 'FabricConfigVersionInfo', - 'FabricError', 'FabricErrorException', + 'FabricError', 'FabricErrorError', 'FabricEvent', 'FailedPropertyBatchInfo', @@ -1366,6 +1392,7 @@ 'ImageStoreCopyDescription', 'ImageStoreInfo', 'InlinedValueSecretResourceProperties', + 'InstanceLifecycleDescription', 'Int64PropertyValue', 'Int64RangePartitionInformation', 'InvokeDataLossResult', @@ -1374,9 +1401,13 @@ 'LoadMetricInformation', 'LoadMetricReport', 'LoadMetricReportInfo', + 'LoadedPartitionInformationQueryDescription', + 'LoadedPartitionInformationResult', + 'LoadedPartitionInformationResultList', 'LocalNetworkResourceProperties', 'ManagedApplicationIdentity', 'ManagedApplicationIdentityDescription', + 'ManagedIdentityAzureBlobBackupStorageDescription', 'MetricLoadDescription', 'MonitoringPolicyDescription', 'NameDescription', @@ -1415,11 +1446,14 @@ 'NodeRepairImpactDescription', 'NodeRepairTargetDescription', 'NodeResult', - 'NodesHealthEvaluation', + 'NodeTagsDescription', 'NodeTransitionProgress', 'NodeTransitionResult', + 'NodeTypeHealthPolicyMapItem', + 'NodeTypeNodesHealthEvaluation', 'NodeUpEvent', 'NodeUpgradeProgressInfo', + 'NodesHealthEvaluation', 'OperationStatus', 'PackageSharingPolicyInfo', 'PagedApplicationInfoList', @@ -1509,9 +1543,10 @@ 'ReplicaHealthStateChunkList', 'ReplicaHealthStateFilter', 'ReplicaInfo', + 'ReplicaLifecycleDescription', 'ReplicaMetricLoadDescription', - 'ReplicasHealthEvaluation', 'ReplicaStatusBase', + 'ReplicasHealthEvaluation', 'ReplicatorQueueStatus', 'ReplicatorStatus', 'ResolvedServiceEndpoint', @@ -1543,6 +1578,7 @@ 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', + 'SecretValueResourceProperties', 'SeedNodeSafetyCheck', 'SelectedPartition', 'ServiceBackupConfigurationInfo', @@ -1566,17 +1602,18 @@ 'ServiceNameInfo', 'ServiceNewHealthReportEvent', 'ServicePartitionInfo', + 'ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription', 'ServicePlacementInvalidDomainPolicyDescription', 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'ServicePlacementPolicyDescription', 'ServicePlacementPreferPrimaryDomainPolicyDescription', - 'ServicePlacementRequiredDomainPolicyDescription', 'ServicePlacementRequireDomainDistributionPolicyDescription', + 'ServicePlacementRequiredDomainPolicyDescription', 'ServiceProperties', 'ServiceReplicaDescription', 'ServiceReplicaProperties', 'ServiceResourceDescription', - 'ServicesHealthEvaluation', + 'ServiceResourceProperties', 'ServiceTypeDescription', 'ServiceTypeExtensionDescription', 'ServiceTypeHealthPolicy', @@ -1585,6 +1622,7 @@ 'ServiceTypeManifest', 'ServiceUpdateDescription', 'ServiceUpgradeProgress', + 'ServicesHealthEvaluation', 'Setting', 'SingletonPartitionInformation', 'SingletonPartitionSchemeDescription', @@ -1643,114 +1681,118 @@ 'WaitForPrimarySwapSafetyCheck', 'WaitForReconfigurationSafetyCheck', 'WaitingChaosEvent', - 'HealthState', - 'FabricErrorCodes', 'ApplicationDefinitionKind', - 'ApplicationStatus', 'ApplicationPackageCleanupPolicy', + 'ApplicationResourceUpgradeState', + 'ApplicationScopedVolumeKind', + 'ApplicationStatus', 'ApplicationTypeDefinitionKind', 'ApplicationTypeStatus', - 'UpgradeKind', - 'UpgradeMode', - 'UpgradeSortOrder', - 'FailureAction', - 'UpgradeDomainState', - 'UpgradeState', - 'NodeUpgradePhase', - 'FailureReason', + 'AutoScalingMechanismKind', + 'AutoScalingMetricKind', + 'AutoScalingResourceMetricName', + 'AutoScalingTriggerKind', + 'BackupEntityKind', + 'BackupPolicyScope', + 'BackupScheduleFrequencyType', + 'BackupScheduleKind', + 'BackupState', + 'BackupStorageKind', + 'BackupSuspensionScope', + 'BackupType', + 'ChaosEventKind', + 'ChaosScheduleStatus', + 'ChaosStatus', + 'ComposeDeploymentStatus', + 'ComposeDeploymentUpgradeState', + 'CreateFabricDump', + 'DataLossMode', + 'DayOfWeek', 'DeactivationIntent', 'DeployedApplicationStatus', - 'ReplicaStatus', - 'ReplicaRole', - 'ReconfigurationPhase', - 'ReconfigurationType', + 'DeploymentStatus', + 'DiagnosticsSinkKind', 'EntityKind', + 'EntryPointStatus', + 'EnvironmentVariableType', + 'ExecutionPolicyType', + 'FabricErrorCodes', 'FabricEventKind', + 'FabricReplicaStatus', + 'FailureAction', + 'FailureReason', + 'HeaderMatchType', 'HealthEvaluationKind', + 'HealthState', + 'HostIsolationMode', + 'HostOptions', + 'HostType', + 'ImageRegistryPasswordType', + 'ImpactLevel', + 'ManagedIdentityType', + 'MoveCost', + 'NetworkKind', 'NodeDeactivationIntent', 'NodeDeactivationStatus', 'NodeDeactivationTaskType', 'NodeStatus', - 'ServicePartitionStatus', - 'ServiceStatus', - 'ProvisionApplicationTypeKind', - 'UpgradeType', - 'SafetyCheckKind', - 'CreateFabricDump', - 'ServicePackageActivationMode', - 'ServiceKind', - 'ServicePartitionKind', - 'ServicePlacementPolicyType', - 'ServiceLoadMetricWeight', - 'HostType', - 'HostIsolationMode', - 'DeploymentStatus', - 'EntryPointStatus', - 'ChaosStatus', - 'ChaosScheduleStatus', - 'ChaosEventKind', - 'ComposeDeploymentStatus', - 'ComposeDeploymentUpgradeState', - 'ServiceCorrelationScheme', - 'MoveCost', - 'PartitionScheme', - 'ServiceOperationName', - 'ReplicatorOperationName', - 'PartitionAccessStatus', - 'FabricReplicaStatus', - 'ReplicaKind', - 'ServiceTypeRegistrationStatus', - 'ServiceEndpointRole', + 'NodeStatusFilter', + 'NodeTransitionType', + 'NodeUpgradePhase', + 'OperatingSystemType', 'OperationState', 'OperationType', + 'Ordering', 'PackageSharingPolicyScope', - 'PropertyValueKind', - 'PropertyBatchOperationKind', + 'PartitionAccessStatus', + 'PartitionScheme', + 'PathMatchType', 'PropertyBatchInfoKind', - 'RetentionPolicyType', - 'BackupStorageKind', - 'BackupScheduleKind', - 'BackupPolicyScope', - 'BackupSuspensionScope', - 'RestoreState', - 'BackupType', - 'BackupScheduleFrequencyType', - 'DayOfWeek', - 'BackupState', - 'BackupEntityKind', - 'ImpactLevel', + 'PropertyBatchOperationKind', + 'PropertyValueKind', + 'ProvisionApplicationTypeKind', + 'QuorumLossMode', + 'ReconfigurationPhase', + 'ReconfigurationType', 'RepairImpactKind', 'RepairTargetKind', - 'State', - 'ResultStatus', 'RepairTaskHealthCheckState', - 'ScalingTriggerKind', - 'ScalingMechanismKind', + 'ReplicaHealthReportServiceKind', + 'ReplicaKind', + 'ReplicaRole', + 'ReplicaStatus', + 'ReplicatorOperationName', 'ResourceStatus', + 'RestartPartitionMode', + 'RestartPolicy', + 'RestoreState', + 'ResultStatus', + 'RetentionPolicyType', + 'RollingUpgradeMode', + 'SafetyCheckKind', + 'ScalingMechanismKind', + 'ScalingTriggerKind', + 'Scheme', 'SecretKind', - 'VolumeProvider', - 'SizeTypes', - 'ApplicationScopedVolumeKind', - 'NetworkKind', - 'HeaderMatchType', - 'OperatingSystemType', - 'ImageRegistryPasswordType', - 'EnvironmentVariableType', + 'ServiceCorrelationScheme', + 'ServiceEndpointRole', + 'ServiceKind', + 'ServiceLoadMetricWeight', + 'ServiceOperationName', + 'ServicePackageActivationMode', + 'ServicePartitionKind', + 'ServicePartitionStatus', + 'ServicePlacementPolicyType', + 'ServiceStatus', + 'ServiceTypeRegistrationStatus', 'SettingType', - 'Scheme', - 'ApplicationResourceUpgradeState', - 'RollingUpgradeMode', - 'DiagnosticsSinkKind', - 'AutoScalingMechanismKind', - 'AutoScalingMetricKind', - 'AutoScalingResourceMetricName', - 'AutoScalingTriggerKind', - 'ExecutionPolicyType', - 'RestartPolicy', - 'NodeStatusFilter', - 'ReplicaHealthReportServiceKind', - 'DataLossMode', - 'NodeTransitionType', - 'QuorumLossMode', - 'RestartPartitionMode', + 'SizeTypes', + 'State', + 'UpgradeDomainState', + 'UpgradeKind', + 'UpgradeMode', + 'UpgradeSortOrder', + 'UpgradeState', + 'UpgradeType', + 'VolumeProvider', ] diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models.py index 58d77b9b41ce..a5b67aec98b9 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models.py @@ -1,19 +1,16 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from msrest.serialization import Model -from msrest.exceptions import HttpOperationError +from azure.core.exceptions import HttpResponseError +import msrest.serialization -class AadMetadata(Model): +class AadMetadata(msrest.serialization.Model): """Azure Active Directory metadata used for secured connection to cluster. :param authority: The AAD authority url. @@ -39,7 +36,10 @@ class AadMetadata(Model): 'tenant': {'key': 'tenant', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AadMetadata, self).__init__(**kwargs) self.authority = kwargs.get('authority', None) self.client = kwargs.get('client', None) @@ -49,14 +49,12 @@ def __init__(self, **kwargs): self.tenant = kwargs.get('tenant', None) -class AadMetadataObject(Model): - """Azure Active Directory metadata object used for secured connection to - cluster. +class AadMetadataObject(msrest.serialization.Model): + """Azure Active Directory metadata object used for secured connection to cluster. :param type: The client authentication method. :type type: str - :param metadata: Azure Active Directory metadata used for secured - connection to cluster. + :param metadata: Azure Active Directory metadata used for secured connection to cluster. :type metadata: ~azure.servicefabric.models.AadMetadata """ @@ -65,23 +63,27 @@ class AadMetadataObject(Model): 'metadata': {'key': 'metadata', 'type': 'AadMetadata'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AadMetadataObject, self).__init__(**kwargs) self.type = kwargs.get('type', None) self.metadata = kwargs.get('metadata', None) -class ScalingMechanismDescription(Model): +class ScalingMechanismDescription(msrest.serialization.Model): """Describes the mechanism for performing a scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionInstanceCountScaleMechanism, - AddRemoveIncrementalNamedPartitionScalingMechanism + sub-classes are: AddRemoveIncrementalNamedPartitionScalingMechanism, PartitionInstanceCountScaleMechanism. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. + Possible values include: "Invalid", "PartitionInstanceCount", + "AddRemoveIncrementalNamedPartition". + :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind """ _validation = { @@ -93,30 +95,32 @@ class ScalingMechanismDescription(Model): } _subtype_map = { - 'kind': {'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism', 'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism'} + 'kind': {'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism', 'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ScalingMechanismDescription, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescription): - """Represents a scaling mechanism for adding or removing named partitions of a - stateless service. Partition names are in the format '0','1''N-1'. + """Represents a scaling mechanism for adding or removing named partitions of a stateless service. Partition names are in the format '0','1''N-1'. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param min_partition_count: Required. Minimum number of named partitions - of the service. + :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. + Possible values include: "Invalid", "PartitionInstanceCount", + "AddRemoveIncrementalNamedPartition". + :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind + :param min_partition_count: Required. Minimum number of named partitions of the service. :type min_partition_count: int - :param max_partition_count: Required. Maximum number of named partitions - of the service. + :param max_partition_count: Required. Maximum number of named partitions of the service. :type max_partition_count: int - :param scale_increment: Required. The number of instances to add or remove - during a scaling operation. + :param scale_increment: Required. The number of instances to add or remove during a scaling + operation. :type scale_increment: int """ @@ -134,25 +138,28 @@ class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescrip 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AddRemoveIncrementalNamedPartitionScalingMechanism, self).__init__(**kwargs) - self.min_partition_count = kwargs.get('min_partition_count', None) - self.max_partition_count = kwargs.get('max_partition_count', None) - self.scale_increment = kwargs.get('scale_increment', None) - self.kind = 'AddRemoveIncrementalNamedPartition' + self.kind = 'AddRemoveIncrementalNamedPartition' # type: str + self.min_partition_count = kwargs['min_partition_count'] + self.max_partition_count = kwargs['max_partition_count'] + self.scale_increment = kwargs['scale_increment'] -class AutoScalingMechanism(Model): - """Describes the mechanism for performing auto scaling operation. Derived - classes will describe the actual mechanism. +class AutoScalingMechanism(msrest.serialization.Model): + """Describes the mechanism for performing auto scaling operation. Derived classes will describe the actual mechanism. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AddRemoveReplicaScalingMechanism + sub-classes are: AddRemoveReplicaScalingMechanism. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of auto scaling mechanism.Constant filled by server. Possible + values include: "AddRemoveReplica". + :type kind: str or ~azure.servicefabric.models.AutoScalingMechanismKind """ _validation = { @@ -167,27 +174,30 @@ class AutoScalingMechanism(Model): 'kind': {'AddRemoveReplica': 'AddRemoveReplicaScalingMechanism'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AutoScalingMechanism, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class AddRemoveReplicaScalingMechanism(AutoScalingMechanism): - """Describes the horizontal auto scaling mechanism that adds or removes - replicas (containers or container groups). + """Describes the horizontal auto scaling mechanism that adds or removes replicas (containers or container groups). All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param min_count: Required. Minimum number of containers (scale down won't - be performed below this number). + :param kind: Required. The type of auto scaling mechanism.Constant filled by server. Possible + values include: "AddRemoveReplica". + :type kind: str or ~azure.servicefabric.models.AutoScalingMechanismKind + :param min_count: Required. Minimum number of containers (scale down won't be performed below + this number). :type min_count: int - :param max_count: Required. Maximum number of containers (scale up won't - be performed above this number). + :param max_count: Required. Maximum number of containers (scale up won't be performed above + this number). :type max_count: int - :param scale_increment: Required. Each time auto scaling is performed, - this number of containers will be added or removed. + :param scale_increment: Required. Each time auto scaling is performed, this number of + containers will be added or removed. :type scale_increment: int """ @@ -205,21 +215,24 @@ class AddRemoveReplicaScalingMechanism(AutoScalingMechanism): 'scale_increment': {'key': 'scaleIncrement', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AddRemoveReplicaScalingMechanism, self).__init__(**kwargs) - self.min_count = kwargs.get('min_count', None) - self.max_count = kwargs.get('max_count', None) - self.scale_increment = kwargs.get('scale_increment', None) - self.kind = 'AddRemoveReplica' + self.kind = 'AddRemoveReplica' # type: str + self.min_count = kwargs['min_count'] + self.max_count = kwargs['max_count'] + self.scale_increment = kwargs['scale_increment'] -class AnalysisEventMetadata(Model): +class AnalysisEventMetadata(msrest.serialization.Model): """Metadata about an Analysis Event. :param delay: The analysis delay. - :type delay: timedelta + :type delay: ~datetime.timedelta :param duration: The duration of analysis. - :type duration: timedelta + :type duration: ~datetime.timedelta """ _attribute_map = { @@ -227,33 +240,35 @@ class AnalysisEventMetadata(Model): 'duration': {'key': 'Duration', 'type': 'duration'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AnalysisEventMetadata, self).__init__(**kwargs) self.delay = kwargs.get('delay', None) self.duration = kwargs.get('duration', None) -class BackupConfigurationInfo(Model): +class BackupConfigurationInfo(msrest.serialization.Model): """Describes the backup configuration information. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationBackupConfigurationInfo, - ServiceBackupConfigurationInfo, PartitionBackupConfigurationInfo + sub-classes are: ApplicationBackupConfigurationInfo, PartitionBackupConfigurationInfo, ServiceBackupConfigurationInfo. All required parameters must be populated in order to send to Azure. - :param policy_name: The name of the backup policy which is applicable to - this Service Fabric application or service or partition. + :param kind: Required. The entity type of a Service Fabric entity such as Application, Service + or a Partition where periodic backups can be enabled.Constant filled by server. Possible + values include: "Invalid", "Partition", "Service", "Application". + :type kind: str or ~azure.servicefabric.models.BackupEntityKind + :param policy_name: The name of the backup policy which is applicable to this Service Fabric + application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup - policy is applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type policy_inherited_from: str or - ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup policy is applied. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { @@ -261,45 +276,45 @@ class BackupConfigurationInfo(Model): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo'} + 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BackupConfigurationInfo, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.policy_name = kwargs.get('policy_name', None) self.policy_inherited_from = kwargs.get('policy_inherited_from', None) self.suspension_info = kwargs.get('suspension_info', None) - self.kind = None class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information for a specific Service Fabric application - specifying what backup policy is being applied and suspend description, if - any. + """Backup configuration information for a specific Service Fabric application specifying what backup policy is being applied and suspend description, if any. All required parameters must be populated in order to send to Azure. - :param policy_name: The name of the backup policy which is applicable to - this Service Fabric application or service or partition. + :param kind: Required. The entity type of a Service Fabric entity such as Application, Service + or a Partition where periodic backups can be enabled.Constant filled by server. Possible + values include: "Invalid", "Partition", "Service", "Application". + :type kind: str or ~azure.servicefabric.models.BackupEntityKind + :param policy_name: The name of the backup policy which is applicable to this Service Fabric + application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup - policy is applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type policy_inherited_from: str or - ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup policy is applied. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param kind: Required. Constant filled by server. - :type kind: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str """ @@ -308,30 +323,34 @@ class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationBackupConfigurationInfo, self).__init__(**kwargs) + self.kind = 'Application' # type: str self.application_name = kwargs.get('application_name', None) - self.kind = 'Application' -class BackupEntity(Model): +class BackupEntity(msrest.serialization.Model): """Describes the Service Fabric entity that is configured for backup. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationBackupEntity, ServiceBackupEntity, - PartitionBackupEntity + sub-classes are: ApplicationBackupEntity, PartitionBackupEntity, ServiceBackupEntity. All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. Constant filled by server. - :type entity_kind: str + :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, + Service or a Partition where periodic backups can be enabled.Constant filled by server. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind """ _validation = { @@ -343,12 +362,15 @@ class BackupEntity(Model): } _subtype_map = { - 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Service': 'ServiceBackupEntity', 'Partition': 'PartitionBackupEntity'} + 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Partition': 'PartitionBackupEntity', 'Service': 'ServiceBackupEntity'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BackupEntity, self).__init__(**kwargs) - self.entity_kind = None + self.entity_kind = None # type: Optional[str] class ApplicationBackupEntity(BackupEntity): @@ -356,10 +378,11 @@ class ApplicationBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. Constant filled by server. - :type entity_kind: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, + Service or a Partition where periodic backups can be enabled.Constant filled by server. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str """ @@ -372,38 +395,35 @@ class ApplicationBackupEntity(BackupEntity): 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationBackupEntity, self).__init__(**kwargs) + self.entity_kind = 'Application' # type: str self.application_name = kwargs.get('application_name', None) - self.entity_kind = 'Application' - - -class ApplicationCapacityDescription(Model): - """Describes capacity information for services of this application. This - description can be used for describing the following. - - Reserving the capacity for the services on the nodes - - Limiting the total number of nodes that services of this application can - run on - - Limiting the custom capacity metrics to limit the total consumption of - this metric by the services of this application. - - :param minimum_nodes: The minimum number of nodes where Service Fabric - will reserve capacity for this application. Note that this does not mean - that the services of this application will be placed on all of those - nodes. If this property is set to zero, no capacity will be reserved. The - value of this property cannot be more than the value of the MaximumNodes - property. + + +class ApplicationCapacityDescription(msrest.serialization.Model): + """Describes capacity information for services of this application. This description can be used for describing the following. + + +* Reserving the capacity for the services on the nodes +* Limiting the total number of nodes that services of this application can run on +* Limiting the custom capacity metrics to limit the total consumption of this metric by the services of this application. + + :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity + for this application. Note that this does not mean that the services of this application will + be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. + The value of this property cannot be more than the value of the MaximumNodes property. :type minimum_nodes: long - :param maximum_nodes: The maximum number of nodes where Service Fabric - will reserve capacity for this application. Note that this does not mean - that the services of this application will be placed on all of those - nodes. By default, the value of this property is zero and it means that - the services can be placed on any node. Default value: 0 . + :param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity + for this application. Note that this does not mean that the services of this application will + be placed on all of those nodes. By default, the value of this property is zero and it means + that the services can be placed on any node. :type maximum_nodes: long - :param application_metrics: List of application capacity metric - description. - :type application_metrics: - list[~azure.servicefabric.models.ApplicationMetricDescription] + :param application_metrics: List of application capacity metric description. + :type application_metrics: list[~azure.servicefabric.models.ApplicationMetricDescription] """ _validation = { @@ -417,127 +437,164 @@ class ApplicationCapacityDescription(Model): 'application_metrics': {'key': 'ApplicationMetrics', 'type': '[ApplicationMetricDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationCapacityDescription, self).__init__(**kwargs) self.minimum_nodes = kwargs.get('minimum_nodes', None) self.maximum_nodes = kwargs.get('maximum_nodes', 0) self.application_metrics = kwargs.get('application_metrics', None) -class FabricEvent(Model): +class FabricEvent(msrest.serialization.Model): """Represents the base for all Fabric Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, - NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'ApplicationEvent': 'ApplicationEvent', 'ClusterEvent': 'ClusterEvent', 'ContainerInstanceEvent': 'ContainerInstanceEvent', 'NodeEvent': 'NodeEvent', 'PartitionEvent': 'PartitionEvent', 'ReplicaEvent': 'ReplicaEvent', 'ServiceEvent': 'ServiceEvent'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FabricEvent, self).__init__(**kwargs) - self.event_instance_id = kwargs.get('event_instance_id', None) + self.kind = None # type: Optional[str] + self.event_instance_id = kwargs['event_instance_id'] self.category = kwargs.get('category', None) - self.time_stamp = kwargs.get('time_stamp', None) + self.time_stamp = kwargs['time_stamp'] self.has_correlated_events = kwargs.get('has_correlated_events', None) - self.kind = None class ApplicationEvent(FabricEvent): """Represents the base for all Application Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationCreatedEvent, ApplicationDeletedEvent, - ApplicationNewHealthReportEvent, ApplicationHealthReportExpiredEvent, - ApplicationUpgradeCompletedEvent, ApplicationUpgradeDomainCompletedEvent, - ApplicationUpgradeRollbackCompletedEvent, - ApplicationUpgradeRollbackStartedEvent, ApplicationUpgradeStartedEvent, - DeployedApplicationNewHealthReportEvent, - DeployedApplicationHealthReportExpiredEvent, ApplicationProcessExitedEvent, - ApplicationContainerInstanceExitedEvent, - DeployedServicePackageNewHealthReportEvent, - DeployedServicePackageHealthReportExpiredEvent, - ChaosCodePackageRestartScheduledEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ApplicationContainerInstanceExitedEvent, ApplicationCreatedEvent, ApplicationDeletedEvent, ApplicationHealthReportExpiredEvent, ApplicationNewHealthReportEvent, ApplicationProcessExitedEvent, ApplicationUpgradeCompletedEvent, ApplicationUpgradeDomainCompletedEvent, ApplicationUpgradeRollbackCompletedEvent, ApplicationUpgradeRollbackStartedEvent, ApplicationUpgradeStartedEvent, ChaosCodePackageRestartScheduledEvent, DeployedApplicationHealthReportExpiredEvent, DeployedApplicationNewHealthReportEvent, DeployedServicePackageHealthReportExpiredEvent, DeployedServicePackageNewHealthReportEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationNewHealthReport': 'ApplicationNewHealthReportEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationUpgradeCompleted': 'ApplicationUpgradeCompletedEvent', 'ApplicationUpgradeDomainCompleted': 'ApplicationUpgradeDomainCompletedEvent', 'ApplicationUpgradeRollbackCompleted': 'ApplicationUpgradeRollbackCompletedEvent', 'ApplicationUpgradeRollbackStarted': 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStarted': 'ApplicationUpgradeStartedEvent', 'DeployedApplicationNewHealthReport': 'DeployedApplicationNewHealthReportEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'ApplicationProcessExited': 'ApplicationProcessExitedEvent', 'ApplicationContainerInstanceExited': 'ApplicationContainerInstanceExitedEvent', 'DeployedServicePackageNewHealthReport': 'DeployedServicePackageNewHealthReportEvent', 'DeployedServicePackageHealthReportExpired': 'DeployedServicePackageHealthReportExpiredEvent', 'ChaosCodePackageRestartScheduled': 'ChaosCodePackageRestartScheduledEvent'} + 'kind': {'ApplicationContainerInstanceExited': 'ApplicationContainerInstanceExitedEvent', 'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationNewHealthReport': 'ApplicationNewHealthReportEvent', 'ApplicationProcessExited': 'ApplicationProcessExitedEvent', 'ApplicationUpgradeCompleted': 'ApplicationUpgradeCompletedEvent', 'ApplicationUpgradeDomainCompleted': 'ApplicationUpgradeDomainCompletedEvent', 'ApplicationUpgradeRollbackCompleted': 'ApplicationUpgradeRollbackCompletedEvent', 'ApplicationUpgradeRollbackStarted': 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStarted': 'ApplicationUpgradeStartedEvent', 'ChaosCodePackageRestartScheduled': 'ChaosCodePackageRestartScheduledEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'DeployedApplicationNewHealthReport': 'DeployedApplicationNewHealthReportEvent', 'DeployedServicePackageHealthReportExpired': 'DeployedServicePackageHealthReportExpiredEvent', 'DeployedServicePackageNewHealthReport': 'DeployedServicePackageNewHealthReportEvent'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationEvent, self).__init__(**kwargs) - self.application_id = kwargs.get('application_id', None) - self.kind = 'ApplicationEvent' + self.kind = 'ApplicationEvent' # type: str + self.application_id = kwargs['application_id'] class ApplicationContainerInstanceExitedEvent(ApplicationEvent): @@ -545,32 +602,50 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param service_name: Required. Name of Service. :type service_name: str :param service_package_name: Required. Name of Service package. :type service_package_name: str - :param service_package_activation_id: Required. Activation Id of Service - package. + :param service_package_activation_id: Required. Activation Id of Service package. :type service_package_activation_id: str :param is_exclusive: Required. Indicates IsExclusive flag. :type is_exclusive: bool @@ -586,17 +661,16 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): :type host_id: str :param exit_code: Required. Exit code of process. :type exit_code: long - :param unexpected_termination: Required. Indicates if termination is - unexpected. + :param unexpected_termination: Required. Indicates if termination is unexpected. :type unexpected_termination: bool :param start_time: Required. Start time of process. - :type start_time: datetime + :type start_time: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'service_name': {'required': True}, 'service_package_name': {'required': True}, @@ -613,11 +687,11 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, @@ -633,21 +707,24 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationContainerInstanceExitedEvent, self).__init__(**kwargs) - self.service_name = kwargs.get('service_name', None) - self.service_package_name = kwargs.get('service_package_name', None) - self.service_package_activation_id = kwargs.get('service_package_activation_id', None) - self.is_exclusive = kwargs.get('is_exclusive', None) - self.code_package_name = kwargs.get('code_package_name', None) - self.entry_point_type = kwargs.get('entry_point_type', None) - self.image_name = kwargs.get('image_name', None) - self.container_name = kwargs.get('container_name', None) - self.host_id = kwargs.get('host_id', None) - self.exit_code = kwargs.get('exit_code', None) - self.unexpected_termination = kwargs.get('unexpected_termination', None) - self.start_time = kwargs.get('start_time', None) - self.kind = 'ApplicationContainerInstanceExited' + self.kind = 'ApplicationContainerInstanceExited' # type: str + self.service_name = kwargs['service_name'] + self.service_package_name = kwargs['service_package_name'] + self.service_package_activation_id = kwargs['service_package_activation_id'] + self.is_exclusive = kwargs['is_exclusive'] + self.code_package_name = kwargs['code_package_name'] + self.entry_point_type = kwargs['entry_point_type'] + self.image_name = kwargs['image_name'] + self.container_name = kwargs['container_name'] + self.host_id = kwargs['host_id'] + self.exit_code = kwargs['exit_code'] + self.unexpected_termination = kwargs['unexpected_termination'] + self.start_time = kwargs['start_time'] class ApplicationCreatedEvent(ApplicationEvent): @@ -655,25 +732,44 @@ class ApplicationCreatedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -684,9 +780,9 @@ class ApplicationCreatedEvent(ApplicationEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -694,23 +790,26 @@ class ApplicationCreatedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationCreatedEvent, self).__init__(**kwargs) - self.application_type_name = kwargs.get('application_type_name', None) - self.application_type_version = kwargs.get('application_type_version', None) - self.application_definition_kind = kwargs.get('application_definition_kind', None) - self.kind = 'ApplicationCreated' + self.kind = 'ApplicationCreated' # type: str + self.application_type_name = kwargs['application_type_name'] + self.application_type_version = kwargs['application_type_version'] + self.application_definition_kind = kwargs['application_definition_kind'] class ApplicationDeletedEvent(ApplicationEvent): @@ -718,25 +817,44 @@ class ApplicationDeletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -745,62 +863,60 @@ class ApplicationDeletedEvent(ApplicationEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationDeletedEvent, self).__init__(**kwargs) - self.application_type_name = kwargs.get('application_type_name', None) - self.application_type_version = kwargs.get('application_type_version', None) - self.kind = 'ApplicationDeleted' + self.kind = 'ApplicationDeleted' # type: str + self.application_type_name = kwargs['application_type_name'] + self.application_type_version = kwargs['application_type_version'] -class ApplicationDescription(Model): +class ApplicationDescription(msrest.serialization.Model): """Describes a Service Fabric application. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the application, including the - 'fabric:' URI scheme. + :param name: Required. The name of the application, including the 'fabric:' URI scheme. :type name: str - :param type_name: Required. The application type name as defined in the - application manifest. + :param type_name: Required. The application type name as defined in the application manifest. :type type_name: str - :param type_version: Required. The version of the application type as - defined in the application manifest. + :param type_version: Required. The version of the application type as defined in the + application manifest. :type type_version: str - :param parameter_list: List of application parameters with overridden - values from their default values specified in the application manifest. - :type parameter_list: - list[~azure.servicefabric.models.ApplicationParameter] - :param application_capacity: Describes capacity information for services - of this application. This description can be used for describing the - following. - - Reserving the capacity for the services on the nodes - - Limiting the total number of nodes that services of this application can - run on - - Limiting the custom capacity metrics to limit the total consumption of - this metric by the services of this application - :type application_capacity: - ~azure.servicefabric.models.ApplicationCapacityDescription - :param managed_application_identity: Managed application identity - description. + :param parameter_list: List of application parameters with overridden values from their default + values specified in the application manifest. + :type parameter_list: list[~azure.servicefabric.models.ApplicationParameter] + :param application_capacity: Describes capacity information for services of this application. + This description can be used for describing the following. + + + * Reserving the capacity for the services on the nodes + * Limiting the total number of nodes that services of this application can run on + * Limiting the custom capacity metrics to limit the total consumption of this metric by the + services of this application. + :type application_capacity: ~azure.servicefabric.models.ApplicationCapacityDescription + :param managed_application_identity: Managed application identity description. :type managed_application_identity: ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ @@ -820,36 +936,36 @@ class ApplicationDescription(Model): 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type_name = kwargs.get('type_name', None) - self.type_version = kwargs.get('type_version', None) + self.name = kwargs['name'] + self.type_name = kwargs['type_name'] + self.type_version = kwargs['type_version'] self.parameter_list = kwargs.get('parameter_list', None) self.application_capacity = kwargs.get('application_capacity', None) self.managed_application_identity = kwargs.get('managed_application_identity', None) -class EntityHealth(Model): - """Health information common to all entities in the cluster. It contains the - aggregated health state, health events and unhealthy evaluation. +class EntityHealth(msrest.serialization.Model): + """Health information common to all entities in the cluster. It contains the aggregated health state, health events and unhealthy evaluation. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics """ @@ -860,7 +976,10 @@ class EntityHealth(Model): 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EntityHealth, self).__init__(**kwargs) self.aggregated_health_state = kwargs.get('aggregated_health_state', None) self.health_events = kwargs.get('health_events', None) @@ -869,36 +988,29 @@ def __init__(self, **kwargs): class ApplicationHealth(EntityHealth): - """Represents the health of the application. Contains the application - aggregated health state and the service and deployed application health - states. - - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + """Represents the health of the application. Contains the application aggregated health state and the service and deployed application health states. + + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: The name of the application, including the 'fabric:' URI - scheme. + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str - :param service_health_states: Service health states as found in the health - store. - :type service_health_states: - list[~azure.servicefabric.models.ServiceHealthState] - :param deployed_application_health_states: Deployed application health - states as found in the health store. + :param service_health_states: Service health states as found in the health store. + :type service_health_states: list[~azure.servicefabric.models.ServiceHealthState] + :param deployed_application_health_states: Deployed application health states as found in the + health store. :type deployed_application_health_states: list[~azure.servicefabric.models.DeployedApplicationHealthState] """ @@ -913,44 +1025,41 @@ class ApplicationHealth(EntityHealth): 'deployed_application_health_states': {'key': 'DeployedApplicationHealthStates', 'type': '[DeployedApplicationHealthState]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealth, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.service_health_states = kwargs.get('service_health_states', None) self.deployed_application_health_states = kwargs.get('deployed_application_health_states', None) -class HealthEvaluation(Model): - """Represents a health evaluation which describes the data and the algorithm - used by health manager to evaluate the health of an entity. +class HealthEvaluation(msrest.serialization.Model): + """Represents a health evaluation which describes the data and the algorithm used by health manager to evaluate the health of an entity. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationHealthEvaluation, ApplicationsHealthEvaluation, - ApplicationTypeApplicationsHealthEvaluation, - DeltaNodesCheckHealthEvaluation, DeployedApplicationHealthEvaluation, - DeployedApplicationsHealthEvaluation, - DeployedServicePackageHealthEvaluation, - DeployedServicePackagesHealthEvaluation, EventHealthEvaluation, - NodeHealthEvaluation, NodesHealthEvaluation, PartitionHealthEvaluation, - PartitionsHealthEvaluation, ReplicaHealthEvaluation, - ReplicasHealthEvaluation, ServiceHealthEvaluation, - ServicesHealthEvaluation, SystemApplicationHealthEvaluation, - UpgradeDomainDeltaNodesCheckHealthEvaluation, - UpgradeDomainNodesHealthEvaluation - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + sub-classes are: ApplicationHealthEvaluation, ApplicationTypeApplicationsHealthEvaluation, ApplicationsHealthEvaluation, DeltaNodesCheckHealthEvaluation, DeployedApplicationHealthEvaluation, DeployedApplicationsHealthEvaluation, DeployedServicePackageHealthEvaluation, DeployedServicePackagesHealthEvaluation, EventHealthEvaluation, NodeHealthEvaluation, NodeTypeNodesHealthEvaluation, NodesHealthEvaluation, PartitionHealthEvaluation, PartitionsHealthEvaluation, ReplicaHealthEvaluation, ReplicasHealthEvaluation, ServiceHealthEvaluation, ServicesHealthEvaluation, SystemApplicationHealthEvaluation, UpgradeDomainDeltaNodesCheckHealthEvaluation, UpgradeDomainNodesHealthEvaluation. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { @@ -958,49 +1067,53 @@ class HealthEvaluation(Model): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'Application': 'ApplicationHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation'} + 'kind': {'Application': 'ApplicationHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'NodeTypeNodes': 'NodeTypeNodesHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HealthEvaluation, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.aggregated_health_state = kwargs.get('aggregated_health_state', None) self.description = kwargs.get('description', None) - self.kind = None class ApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for an application, containing information - about the data and the algorithm used by the health store to evaluate - health. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for an application, containing information about the data and the algorithm used by the health store to evaluate health. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the application. The types of the - unhealthy evaluations can be DeployedApplicationsHealthEvaluation, - ServicesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the application. The types of the unhealthy evaluations can be + DeployedApplicationsHealthEvaluation, ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -1008,27 +1121,28 @@ class ApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Application' # type: str self.application_name = kwargs.get('application_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Application' -class ApplicationHealthPolicies(Model): - """Defines the application health policy map used to evaluate the health of an - application or one of its children entities. +class ApplicationHealthPolicies(msrest.serialization.Model): + """Defines the application health policy map used to evaluate the health of an application or one of its children entities. - :param application_health_policy_map: The wrapper that contains the map - with application health policies used to evaluate specific applications in - the cluster. + :param application_health_policy_map: The wrapper that contains the map with application health + policies used to evaluate specific applications in the cluster. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] """ @@ -1037,36 +1151,34 @@ class ApplicationHealthPolicies(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthPolicies, self).__init__(**kwargs) self.application_health_policy_map = kwargs.get('application_health_policy_map', None) -class ApplicationHealthPolicy(Model): - """Defines a health policy used to evaluate the health of an application or - one of its children entities. +class ApplicationHealthPolicy(msrest.serialization.Model): + """Defines a health policy used to evaluate the health of an application or one of its children entities. - :param consider_warning_as_error: Indicates whether warnings are treated - with the same severity as errors. Default value: False . + :param consider_warning_as_error: Indicates whether warnings are treated with the same severity + as errors. :type consider_warning_as_error: bool - :param max_percent_unhealthy_deployed_applications: The maximum allowed - percentage of unhealthy deployed applications. Allowed values are Byte - values from zero to 100. - The percentage represents the maximum tolerated percentage of deployed - applications that can be unhealthy before the application is considered in - error. - This is calculated by dividing the number of unhealthy deployed - applications over the number of nodes where the application is currently - deployed on in the cluster. - The computation rounds up to tolerate one failure on small numbers of - nodes. Default percentage is zero. Default value: 0 . + :param max_percent_unhealthy_deployed_applications: The maximum allowed percentage of unhealthy + deployed applications. Allowed values are Byte values from zero to 100. + The percentage represents the maximum tolerated percentage of deployed applications that can + be unhealthy before the application is considered in error. + This is calculated by dividing the number of unhealthy deployed applications over the number + of nodes where the application is currently deployed on in the cluster. + The computation rounds up to tolerate one failure on small numbers of nodes. Default + percentage is zero. :type max_percent_unhealthy_deployed_applications: int - :param default_service_type_health_policy: The health policy used by - default to evaluate the health of a service type. - :type default_service_type_health_policy: - ~azure.servicefabric.models.ServiceTypeHealthPolicy - :param service_type_health_policy_map: The map with service type health - policy per service type name. The map is empty by default. + :param default_service_type_health_policy: The health policy used by default to evaluate the + health of a service type. + :type default_service_type_health_policy: ~azure.servicefabric.models.ServiceTypeHealthPolicy + :param service_type_health_policy_map: The map with service type health policy per service type + name. The map is empty by default. :type service_type_health_policy_map: list[~azure.servicefabric.models.ServiceTypeHealthPolicyMapItem] """ @@ -1078,7 +1190,10 @@ class ApplicationHealthPolicy(Model): 'service_type_health_policy_map': {'key': 'ServiceTypeHealthPolicyMap', 'type': '[ServiceTypeHealthPolicyMapItem]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthPolicy, self).__init__(**kwargs) self.consider_warning_as_error = kwargs.get('consider_warning_as_error', False) self.max_percent_unhealthy_deployed_applications = kwargs.get('max_percent_unhealthy_deployed_applications', 0) @@ -1086,16 +1201,16 @@ def __init__(self, **kwargs): self.service_type_health_policy_map = kwargs.get('service_type_health_policy_map', None) -class ApplicationHealthPolicyMapItem(Model): +class ApplicationHealthPolicyMapItem(msrest.serialization.Model): """Defines an item in ApplicationHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the application health policy map item. - This is the name of the application. + :param key: Required. The key of the application health policy map item. This is the name of + the application. :type key: str - :param value: Required. The value of the application health policy map - item. This is the ApplicationHealthPolicy for this application. + :param value: Required. The value of the application health policy map item. This is the + ApplicationHealthPolicy for this application. :type value: ~azure.servicefabric.models.ApplicationHealthPolicy """ @@ -1109,24 +1224,25 @@ class ApplicationHealthPolicyMapItem(Model): 'value': {'key': 'Value', 'type': 'ApplicationHealthPolicy'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthPolicyMapItem, self).__init__(**kwargs) - self.key = kwargs.get('key', None) - self.value = kwargs.get('value', None) + self.key = kwargs['key'] + self.value = kwargs['value'] -class ApplicationHealthPolicyMapObject(Model): - """Represents the map of application health policies for a ServiceFabric - cluster upgrade. +class ApplicationHealthPolicyMapObject(msrest.serialization.Model): + """Represents the map of application health policies for a ServiceFabric cluster upgrade. - :param application_health_policy_map: Defines a map that contains specific - application health policies for different applications. - Each entry specifies as key the application name and as value an - ApplicationHealthPolicy used to evaluate the application health. - If an application is not specified in the map, the application health - evaluation uses the ApplicationHealthPolicy found in its application - manifest or the default application health policy (if no health policy is - defined in the manifest). + :param application_health_policy_map: Defines a map that contains specific application health + policies for different applications. + Each entry specifies as key the application name and as value an ApplicationHealthPolicy used + to evaluate the application health. + If an application is not specified in the map, the application health evaluation uses the + ApplicationHealthPolicy found in its application manifest or the default application health + policy (if no health policy is defined in the manifest). The map is empty by default. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] @@ -1136,7 +1252,10 @@ class ApplicationHealthPolicyMapObject(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthPolicyMapObject, self).__init__(**kwargs) self.application_health_policy_map = kwargs.get('application_health_policy_map', None) @@ -1146,25 +1265,44 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -1180,17 +1318,16 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -1204,11 +1341,11 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -1221,53 +1358,52 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthReportExpiredEvent, self).__init__(**kwargs) - self.application_instance_id = kwargs.get('application_instance_id', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'ApplicationHealthReportExpired' + self.kind = 'ApplicationHealthReportExpired' # type: str + self.application_instance_id = kwargs['application_instance_id'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] -class EntityHealthState(Model): - """A base type for the health state of various entities in the cluster. It - contains the aggregated health state. +class EntityHealthState(msrest.serialization.Model): + """A base type for the health state of various entities in the cluster. It contains the aggregated health state. - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState """ _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EntityHealthState, self).__init__(**kwargs) self.aggregated_health_state = kwargs.get('aggregated_health_state', None) class ApplicationHealthState(EntityHealthState): - """Represents the health state of an application, which contains the - application identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param name: The name of the application, including the 'fabric:' URI - scheme. + """Represents the health state of an application, which contains the application identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str """ @@ -1276,18 +1412,20 @@ class ApplicationHealthState(EntityHealthState): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthState, self).__init__(**kwargs) self.name = kwargs.get('name', None) -class EntityHealthStateChunk(Model): - """A base type for the health state chunk of various entities in the cluster. - It contains the aggregated health state. +class EntityHealthStateChunk(msrest.serialization.Model): + """A base type for the health state chunk of various entities in the cluster. It contains the aggregated health state. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState """ @@ -1295,35 +1433,31 @@ class EntityHealthStateChunk(Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EntityHealthStateChunk, self).__init__(**kwargs) self.health_state = kwargs.get('health_state', None) class ApplicationHealthStateChunk(EntityHealthStateChunk): """Represents the health state chunk of a application. - The application health state chunk contains the application name, its - aggregated health state and any children services and deployed applications - that respect the filters in cluster health chunk query description. +The application health state chunk contains the application name, its aggregated health state and any children services and deployed applications that respect the filters in cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param application_type_name: The application type name as defined in the - application manifest. + :param application_type_name: The application type name as defined in the application manifest. :type application_type_name: str - :param service_health_state_chunks: The list of service health state - chunks in the cluster that respect the filters in the cluster health chunk - query description. - :type service_health_state_chunks: - ~azure.servicefabric.models.ServiceHealthStateChunkList - :param deployed_application_health_state_chunks: The list of deployed - application health state chunks in the cluster that respect the filters in - the cluster health chunk query description. + :param service_health_state_chunks: The list of service health state chunks in the cluster that + respect the filters in the cluster health chunk query description. + :type service_health_state_chunks: ~azure.servicefabric.models.ServiceHealthStateChunkList + :param deployed_application_health_state_chunks: The list of deployed application health state + chunks in the cluster that respect the filters in the cluster health chunk query description. :type deployed_application_health_state_chunks: ~azure.servicefabric.models.DeployedApplicationHealthStateChunkList """ @@ -1336,7 +1470,10 @@ class ApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_application_health_state_chunks': {'key': 'DeployedApplicationHealthStateChunks', 'type': 'DeployedApplicationHealthStateChunkList'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthStateChunk, self).__init__(**kwargs) self.application_name = kwargs.get('application_name', None) self.application_type_name = kwargs.get('application_type_name', None) @@ -1344,12 +1481,11 @@ def __init__(self, **kwargs): self.deployed_application_health_state_chunks = kwargs.get('deployed_application_health_state_chunks', None) -class EntityHealthStateChunkList(Model): - """A base type for the list of health state chunks found in the cluster. It - contains the total number of health states that match the input filters. +class EntityHealthStateChunkList(msrest.serialization.Model): + """A base type for the list of health state chunks found in the cluster. It contains the total number of health states that match the input filters. - :param total_count: Total number of entity health state objects that match - the specified filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match the specified + filters from the cluster health chunk query description. :type total_count: long """ @@ -1357,21 +1493,22 @@ class EntityHealthStateChunkList(Model): 'total_count': {'key': 'TotalCount', 'type': 'long'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EntityHealthStateChunkList, self).__init__(**kwargs) self.total_count = kwargs.get('total_count', None) class ApplicationHealthStateChunkList(EntityHealthStateChunkList): - """The list of application health state chunks in the cluster that respect the - input filters in the chunk query. Returned by get cluster health state - chunks query. + """The list of application health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param total_count: Total number of entity health state objects that match - the specified filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match the specified + filters from the cluster health chunk query description. :type total_count: long - :param items: The list of application health state chunks that respect the - input filters in the chunk query. + :param items: The list of application health state chunks that respect the input filters in the + chunk query. :type items: list[~azure.servicefabric.models.ApplicationHealthStateChunk] """ @@ -1380,87 +1517,78 @@ class ApplicationHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[ApplicationHealthStateChunk]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class ApplicationHealthStateFilter(Model): - """Defines matching criteria to determine whether a application should be - included in the cluster health chunk. - One filter can match zero, one or multiple applications, depending on its - properties. +class ApplicationHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a application should be included in the cluster health chunk. +One filter can match zero, one or multiple applications, depending on its properties. - :param application_name_filter: The name of the application that matches - the filter, as a fabric uri. The filter is applied only to the specified - application, if it exists. - If the application doesn't exist, no application is returned in the + :param application_name_filter: The name of the application that matches the filter, as a + fabric uri. The filter is applied only to the specified application, if it exists. + If the application doesn't exist, no application is returned in the cluster health chunk based + on this filter. + If the application exists, it is included in the cluster health chunk if it respects the other + filter properties. + If not specified, all applications are matched against the other filter members, like health + state filter. + :type application_name_filter: str + :param application_type_name_filter: The name of the application type that matches the filter. + If specified, the filter is applied only to applications of the selected application type, if + any exists. + If no applications of the specified application type exists, no application is returned in the cluster health chunk based on this filter. - If the application exists, it is included in the cluster health chunk if + Each application of the specified application type is included in the cluster health chunk if it respects the other filter properties. - If not specified, all applications are matched against the other filter - members, like health state filter. - :type application_name_filter: str - :param application_type_name_filter: The name of the application type that - matches the filter. - If specified, the filter is applied only to applications of the selected - application type, if any exists. - If no applications of the specified application type exists, no - application is returned in the cluster health chunk based on this filter. - Each application of the specified application type is included in the - cluster health chunk if it respects the other filter properties. - If not specified, all applications are matched against the other filter - members, like health state filter. + If not specified, all applications are matched against the other filter members, like health + state filter. :type application_type_name_filter: str - :param health_state_filter: The filter for the health state of the - applications. It allows selecting applications if they match the desired - health states. - The possible values are integer value of one of the following health - states. Only applications that match the filter are returned. All - applications are used to evaluate the cluster aggregated health state. - If not specified, default value is None, unless the application name or - the application type name are specified. If the filter has default value - and application name is specified, the matching application is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches applications with - HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the applications. It allows + selecting applications if they match the desired health states. + The possible values are integer value of one of the following health states. Only applications + that match the filter are returned. All applications are used to evaluate the cluster + aggregated health state. + If not specified, default value is None, unless the application name or the application type + name are specified. If the filter has default value and application name is specified, the + matching application is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches applications with HealthState value of OK + (2) and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int - :param service_filters: Defines a list of filters that specify which - services to be included in the returned cluster health chunk as children - of the application. The services are returned only if the parent - application matches a filter. - If the list is empty, no services are returned. All the services are used - to evaluate the parent application aggregated health state, regardless of - the input filters. + :param service_filters: Defines a list of filters that specify which services to be included in + the returned cluster health chunk as children of the application. The services are returned + only if the parent application matches a filter. + If the list is empty, no services are returned. All the services are used to evaluate the + parent application aggregated health state, regardless of the input filters. The application filter may specify multiple service filters. - For example, it can specify a filter to return all services with health - state Error and another filter to always include a service identified by - its service name. - :type service_filters: - list[~azure.servicefabric.models.ServiceHealthStateFilter] - :param deployed_application_filters: Defines a list of filters that - specify which deployed applications to be included in the returned cluster - health chunk as children of the application. The deployed applications are - returned only if the parent application matches a filter. - If the list is empty, no deployed applications are returned. All the - deployed applications are used to evaluate the parent application - aggregated health state, regardless of the input filters. + For example, it can specify a filter to return all services with health state Error and + another filter to always include a service identified by its service name. + :type service_filters: list[~azure.servicefabric.models.ServiceHealthStateFilter] + :param deployed_application_filters: Defines a list of filters that specify which deployed + applications to be included in the returned cluster health chunk as children of the + application. The deployed applications are returned only if the parent application matches a + filter. + If the list is empty, no deployed applications are returned. All the deployed applications are + used to evaluate the parent application aggregated health state, regardless of the input + filters. The application filter may specify multiple deployed application filters. - For example, it can specify a filter to return all deployed applications - with health state Error and another filter to always include a deployed - application on a specified node. + For example, it can specify a filter to return all deployed applications with health state + Error and another filter to always include a deployed application on a specified node. :type deployed_application_filters: list[~azure.servicefabric.models.DeployedApplicationHealthStateFilter] """ @@ -1473,7 +1601,10 @@ class ApplicationHealthStateFilter(Model): 'deployed_application_filters': {'key': 'DeployedApplicationFilters', 'type': '[DeployedApplicationHealthStateFilter]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationHealthStateFilter, self).__init__(**kwargs) self.application_name_filter = kwargs.get('application_name_filter', None) self.application_type_name_filter = kwargs.get('application_type_name_filter', None) @@ -1482,41 +1613,38 @@ def __init__(self, **kwargs): self.deployed_application_filters = kwargs.get('deployed_application_filters', None) -class ApplicationInfo(Model): +class ApplicationInfo(msrest.serialization.Model): """Information about a Service Fabric application. - :param id: The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to - identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param id: The identity of the application. This is an encoded representation of the + application name. This is used in the REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI - scheme. + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str - :param type_name: The application type name as defined in the application - manifest. + :param type_name: The application type name as defined in the application manifest. :type type_name: str - :param type_version: The version of the application type as defined in the - application manifest. + :param type_version: The version of the application type as defined in the application + manifest. :type type_version: str - :param status: The status of the application. Possible values include: - 'Invalid', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :param status: The status of the application. Possible values include: "Invalid", "Ready", + "Upgrading", "Creating", "Deleting", "Failed". :type status: str or ~azure.servicefabric.models.ApplicationStatus - :param parameters: List of application parameters with overridden values - from their default values specified in the application manifest. + :param parameters: List of application parameters with overridden values from their default + values specified in the application manifest. :type parameters: list[~azure.servicefabric.models.ApplicationParameter] - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param application_definition_kind: The mechanism used to define a Service - Fabric application. Possible values include: 'Invalid', - 'ServiceFabricApplicationDescription', 'Compose' - :type application_definition_kind: str or - ~azure.servicefabric.models.ApplicationDefinitionKind + :param application_definition_kind: The mechanism used to define a Service Fabric application. + Possible values include: "Invalid", "ServiceFabricApplicationDescription", "Compose". + :type application_definition_kind: str or ~azure.servicefabric.models.ApplicationDefinitionKind + :param managed_application_identity: Managed application identity description. + :type managed_application_identity: + ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ _attribute_map = { @@ -1528,9 +1656,13 @@ class ApplicationInfo(Model): 'parameters': {'key': 'Parameters', 'type': '[ApplicationParameter]'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, + 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.name = kwargs.get('name', None) @@ -1540,39 +1672,31 @@ def __init__(self, **kwargs): self.parameters = kwargs.get('parameters', None) self.health_state = kwargs.get('health_state', None) self.application_definition_kind = kwargs.get('application_definition_kind', None) + self.managed_application_identity = kwargs.get('managed_application_identity', None) -class ApplicationLoadInfo(Model): +class ApplicationLoadInfo(msrest.serialization.Model): """Load Information about a Service Fabric application. - :param id: The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to - identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param id: The identity of the application. This is an encoded representation of the + application name. This is used in the REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type id: str :param minimum_nodes: The minimum number of nodes for this application. - It is the number of nodes where Service Fabric will reserve Capacity in - the cluster which equals to ReservedLoad * MinimumNodes for this - Application instance. - For applications that do not have application capacity defined this value - will be zero. + It is the number of nodes where Service Fabric will reserve Capacity in the cluster which + equals to ReservedLoad * MinimumNodes for this Application instance. + For applications that do not have application capacity defined this value will be zero. :type minimum_nodes: long - :param maximum_nodes: The maximum number of nodes where this application - can be instantiated. + :param maximum_nodes: The maximum number of nodes where this application can be instantiated. It is the number of nodes this application is allowed to span. - For applications that do not have application capacity defined this value - will be zero. + For applications that do not have application capacity defined this value will be zero. :type maximum_nodes: long - :param node_count: The number of nodes on which this application is - instantiated. - For applications that do not have application capacity defined this value - will be zero. + :param node_count: The number of nodes on which this application is instantiated. + For applications that do not have application capacity defined this value will be zero. :type node_count: long - :param application_load_metric_information: List of application load - metric information. + :param application_load_metric_information: List of application load metric information. :type application_load_metric_information: list[~azure.servicefabric.models.ApplicationLoadMetricInformation] """ @@ -1585,7 +1709,10 @@ class ApplicationLoadInfo(Model): 'application_load_metric_information': {'key': 'ApplicationLoadMetricInformation', 'type': '[ApplicationLoadMetricInformation]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationLoadInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.minimum_nodes = kwargs.get('minimum_nodes', None) @@ -1594,26 +1721,20 @@ def __init__(self, **kwargs): self.application_load_metric_information = kwargs.get('application_load_metric_information', None) -class ApplicationLoadMetricInformation(Model): - """Describes load information for a custom resource balancing metric. This can - be used to limit the total consumption of this metric by the services of - this application. +class ApplicationLoadMetricInformation(msrest.serialization.Model): + """Describes load information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. :param name: The name of the metric. :type name: str - :param reservation_capacity: This is the capacity reserved in the cluster - for the application. + :param reservation_capacity: This is the capacity reserved in the cluster for the application. It's the product of NodeReservationCapacity and MinimumNodes. If set to zero, no capacity is reserved for this metric. - When setting application capacity or when updating application capacity - this value must be smaller than or equal to MaximumCapacity for each - metric. + When setting application capacity or when updating application capacity this value must be + smaller than or equal to MaximumCapacity for each metric. :type reservation_capacity: long - :param application_capacity: Total capacity for this metric in this - application instance. + :param application_capacity: Total capacity for this metric in this application instance. :type application_capacity: long - :param application_load: Current load for this metric in this application - instance. + :param application_load: Current load for this metric in this application instance. :type application_load: long """ @@ -1624,7 +1745,10 @@ class ApplicationLoadMetricInformation(Model): 'application_load': {'key': 'ApplicationLoad', 'type': 'long'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationLoadMetricInformation, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.reservation_capacity = kwargs.get('reservation_capacity', None) @@ -1632,46 +1756,35 @@ def __init__(self, **kwargs): self.application_load = kwargs.get('application_load', None) -class ApplicationMetricDescription(Model): - """Describes capacity information for a custom resource balancing metric. This - can be used to limit the total consumption of this metric by the services - of this application. +class ApplicationMetricDescription(msrest.serialization.Model): + """Describes capacity information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. :param name: The name of the metric. :type name: str - :param maximum_capacity: The maximum node capacity for Service Fabric - application. - This is the maximum Load for an instance of this application on a single - node. Even if the capacity of node is greater than this value, Service - Fabric will limit the total load of services within the application on - each node to this value. + :param maximum_capacity: The maximum node capacity for Service Fabric application. + This is the maximum Load for an instance of this application on a single node. Even if the + capacity of node is greater than this value, Service Fabric will limit the total load of + services within the application on each node to this value. If set to zero, capacity for this metric is unlimited on each node. - When creating a new application with application capacity defined, the - product of MaximumNodes and this value must always be smaller than or - equal to TotalApplicationCapacity. - When updating existing application with application capacity, the product - of MaximumNodes and this value must always be smaller than or equal to - TotalApplicationCapacity. + When creating a new application with application capacity defined, the product of MaximumNodes + and this value must always be smaller than or equal to TotalApplicationCapacity. + When updating existing application with application capacity, the product of MaximumNodes and + this value must always be smaller than or equal to TotalApplicationCapacity. :type maximum_capacity: long - :param reservation_capacity: The node reservation capacity for Service - Fabric application. - This is the amount of load which is reserved on nodes which have instances - of this application. - If MinimumNodes is specified, then the product of these values will be the - capacity reserved in the cluster for the application. + :param reservation_capacity: The node reservation capacity for Service Fabric application. + This is the amount of load which is reserved on nodes which have instances of this + application. + If MinimumNodes is specified, then the product of these values will be the capacity reserved + in the cluster for the application. If set to zero, no capacity is reserved for this metric. - When setting application capacity or when updating application capacity; - this value must be smaller than or equal to MaximumCapacity for each - metric. + When setting application capacity or when updating application capacity; this value must be + smaller than or equal to MaximumCapacity for each metric. :type reservation_capacity: long - :param total_application_capacity: The total metric capacity for Service - Fabric application. - This is the total metric capacity for this application in the cluster. - Service Fabric will try to limit the sum of loads of services within the - application to this value. - When creating a new application with application capacity defined, the - product of MaximumNodes and MaximumCapacity must always be smaller than or - equal to this value. + :param total_application_capacity: The total metric capacity for Service Fabric application. + This is the total metric capacity for this application in the cluster. Service Fabric will try + to limit the sum of loads of services within the application to this value. + When creating a new application with application capacity defined, the product of MaximumNodes + and MaximumCapacity must always be smaller than or equal to this value. :type total_application_capacity: long """ @@ -1682,7 +1795,10 @@ class ApplicationMetricDescription(Model): 'total_application_capacity': {'key': 'TotalApplicationCapacity', 'type': 'long'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationMetricDescription, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.maximum_capacity = kwargs.get('maximum_capacity', None) @@ -1690,19 +1806,16 @@ def __init__(self, **kwargs): self.total_application_capacity = kwargs.get('total_application_capacity', None) -class ApplicationNameInfo(Model): +class ApplicationNameInfo(msrest.serialization.Model): """Information about the application name. - :param id: The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to - identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param id: The identity of the application. This is an encoded representation of the + application name. This is used in the REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI - scheme. + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str """ @@ -1711,7 +1824,10 @@ class ApplicationNameInfo(Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationNameInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.name = kwargs.get('name', None) @@ -1722,25 +1838,44 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -1756,17 +1891,16 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -1780,11 +1914,11 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -1797,23 +1931,25 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationNewHealthReportEvent, self).__init__(**kwargs) - self.application_instance_id = kwargs.get('application_instance_id', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'ApplicationNewHealthReport' + self.kind = 'ApplicationNewHealthReport' # type: str + self.application_instance_id = kwargs['application_instance_id'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] -class ApplicationParameter(Model): - """Describes an application parameter override to be applied when creating or - upgrading an application. +class ApplicationParameter(msrest.serialization.Model): + """Describes an application parameter override to be applied when creating or upgrading an application. All required parameters must be populated in order to send to Azure. @@ -1833,10 +1969,13 @@ class ApplicationParameter(Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationParameter, self).__init__(**kwargs) - self.key = kwargs.get('key', None) - self.value = kwargs.get('value', None) + self.key = kwargs['key'] + self.value = kwargs['value'] class ApplicationProcessExitedEvent(ApplicationEvent): @@ -1844,32 +1983,50 @@ class ApplicationProcessExitedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param service_name: Required. Name of Service. :type service_name: str :param service_package_name: Required. Name of Service package. :type service_package_name: str - :param service_package_activation_id: Required. Activation Id of Service - package. + :param service_package_activation_id: Required. Activation Id of Service package. :type service_package_activation_id: str :param is_exclusive: Required. Indicates IsExclusive flag. :type is_exclusive: bool @@ -1885,17 +2042,16 @@ class ApplicationProcessExitedEvent(ApplicationEvent): :type host_id: str :param exit_code: Required. Exit code of process. :type exit_code: long - :param unexpected_termination: Required. Indicates if termination is - unexpected. + :param unexpected_termination: Required. Indicates if termination is unexpected. :type unexpected_termination: bool :param start_time: Required. Start time of process. - :type start_time: datetime + :type start_time: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'service_name': {'required': True}, 'service_package_name': {'required': True}, @@ -1912,11 +2068,11 @@ class ApplicationProcessExitedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, @@ -1932,64 +2088,62 @@ class ApplicationProcessExitedEvent(ApplicationEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationProcessExitedEvent, self).__init__(**kwargs) - self.service_name = kwargs.get('service_name', None) - self.service_package_name = kwargs.get('service_package_name', None) - self.service_package_activation_id = kwargs.get('service_package_activation_id', None) - self.is_exclusive = kwargs.get('is_exclusive', None) - self.code_package_name = kwargs.get('code_package_name', None) - self.entry_point_type = kwargs.get('entry_point_type', None) - self.exe_name = kwargs.get('exe_name', None) - self.process_id = kwargs.get('process_id', None) - self.host_id = kwargs.get('host_id', None) - self.exit_code = kwargs.get('exit_code', None) - self.unexpected_termination = kwargs.get('unexpected_termination', None) - self.start_time = kwargs.get('start_time', None) - self.kind = 'ApplicationProcessExited' - - -class ApplicationResourceDescription(Model): + self.kind = 'ApplicationProcessExited' # type: str + self.service_name = kwargs['service_name'] + self.service_package_name = kwargs['service_package_name'] + self.service_package_activation_id = kwargs['service_package_activation_id'] + self.is_exclusive = kwargs['is_exclusive'] + self.code_package_name = kwargs['code_package_name'] + self.entry_point_type = kwargs['entry_point_type'] + self.exe_name = kwargs['exe_name'] + self.process_id = kwargs['process_id'] + self.host_id = kwargs['host_id'] + self.exit_code = kwargs['exit_code'] + self.unexpected_termination = kwargs['unexpected_termination'] + self.start_time = kwargs['start_time'] + + +class ApplicationResourceDescription(msrest.serialization.Model): """This type describes a application resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the Application resource. :type name: str + :param identity: Describes the identity of the application. + :type identity: ~azure.servicefabric.models.IdentityDescription :param description: User readable description of the application. :type description: str - :param services: Describes the services in the application. This property - is used to create or modify services of the application. On get only the - name of the service is returned. The service description can be obtained - by querying for the service resource. - :type services: - list[~azure.servicefabric.models.ServiceResourceDescription] - :param diagnostics: Describes the diagnostics definition and usage for an - application resource. + :param services: Describes the services in the application. This property is used to create or + modify services of the application. On get only the name of the service is returned. The + service description can be obtained by querying for the service resource. + :type services: list[~azure.servicefabric.models.ServiceResourceDescription] + :param diagnostics: Describes the diagnostics definition and usage for an application resource. :type diagnostics: ~azure.servicefabric.models.DiagnosticsDescription - :param debug_params: Internal - used by Visual Studio to setup the - debugging session on the local development environment. + :param debug_params: Internal - used by Visual Studio to setup the debugging session on the + local development environment. :type debug_params: str :ivar service_names: Names of the services in the application. :vartype service_names: list[str] - :ivar status: Status of the application. Possible values include: - 'Unknown', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the application. Possible values include: "Unknown", "Ready", + "Upgrading", "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the application. + :ivar status_details: Gives additional information about the current status of the application. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :ivar health_state: Describes the health state of an application resource. Possible values + include: "Invalid", "Ok", "Warning", "Error", "Unknown". :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the application's health state is not - 'Ok', this additional details from service fabric Health Manager for the - user to know why the application is marked unhealthy. + :ivar unhealthy_evaluation: When the application's health state is not 'Ok', this additional + details from service fabric Health Manager for the user to know why the application is marked + unhealthy. :vartype unhealthy_evaluation: str - :param identity: Describes the identity of the application. - :type identity: ~azure.servicefabric.models.IdentityDescription """ _validation = { @@ -2003,6 +2157,7 @@ class ApplicationResourceDescription(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, + 'identity': {'key': 'identity', 'type': 'IdentityDescription'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'services': {'key': 'properties.services', 'type': '[ServiceResourceDescription]'}, 'diagnostics': {'key': 'properties.diagnostics', 'type': 'DiagnosticsDescription'}, @@ -2012,12 +2167,15 @@ class ApplicationResourceDescription(Model): 'status_details': {'key': 'properties.statusDetails', 'type': 'str'}, 'health_state': {'key': 'properties.healthState', 'type': 'str'}, 'unhealthy_evaluation': {'key': 'properties.unhealthyEvaluation', 'type': 'str'}, - 'identity': {'key': 'identity', 'type': 'IdentityDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationResourceDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] + self.identity = kwargs.get('identity', None) self.description = kwargs.get('description', None) self.services = kwargs.get('services', None) self.diagnostics = kwargs.get('diagnostics', None) @@ -2027,56 +2185,45 @@ def __init__(self, **kwargs): self.status_details = None self.health_state = None self.unhealthy_evaluation = None - self.identity = kwargs.get('identity', None) -class ApplicationResourceUpgradeProgressInfo(Model): +class ApplicationResourceUpgradeProgressInfo(msrest.serialization.Model): """This type describes an application resource upgrade. :param name: Name of the Application resource. :type name: str - :param target_application_type_version: The target application version for - the application upgrade. + :param target_application_type_version: The target application version for the application + upgrade. :type target_application_type_version: str - :param start_timestamp_utc: The estimated UTC datetime when the upgrade - started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. :type start_timestamp_utc: str - :param upgrade_state: The state of the application resource upgrade. - Possible values include: 'Invalid', 'ProvisioningTarget', - 'RollingForward', 'UnprovisioningCurrent', 'CompletedRollforward', - 'RollingBack', 'UnprovisioningTarget', 'CompletedRollback', 'Failed' - :type upgrade_state: str or - ~azure.servicefabric.models.ApplicationResourceUpgradeState - :param percent_completed: The estimated percent of replicas are completed - in the upgrade. + :param upgrade_state: The state of the application resource upgrade. Possible values include: + "Invalid", "ProvisioningTarget", "RollingForward", "UnprovisioningCurrent", + "CompletedRollforward", "RollingBack", "UnprovisioningTarget", "CompletedRollback", "Failed". + :type upgrade_state: str or ~azure.servicefabric.models.ApplicationResourceUpgradeState + :param percent_completed: The estimated percent of replicas are completed in the upgrade. :type percent_completed: str :param service_upgrade_progress: List of service upgrade progresses. - :type service_upgrade_progress: - list[~azure.servicefabric.models.ServiceUpgradeProgress] - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "Monitored" . - :type rolling_upgrade_mode: str or - ~azure.servicefabric.models.RollingUpgradeMode - :param upgrade_duration: The estimated amount of time that the overall - upgrade elapsed. It is first interpreted as a string representing an ISO - 8601 duration. If that fails, then it is interpreted as a number - representing the total number of milliseconds. Default value: "PT0H2M0S" . + :type service_upgrade_progress: list[~azure.servicefabric.models.ServiceUpgradeProgress] + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: "Monitored". + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.RollingUpgradeMode + :param upgrade_duration: The estimated amount of time that the overall upgrade elapsed. It is + first interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type upgrade_duration: str - :param application_upgrade_status_details: Additional detailed information - about the status of the pending upgrade. + :param application_upgrade_status_details: Additional detailed information about the status of + the pending upgrade. :type application_upgrade_status_details: str - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). Default value: 42949672925 . + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade - failed and FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and + FailureAction was executed. :type failure_timestamp_utc: str """ @@ -2094,7 +2241,10 @@ class ApplicationResourceUpgradeProgressInfo(Model): 'failure_timestamp_utc': {'key': 'FailureTimestampUtc', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationResourceUpgradeProgressInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.target_application_type_version = kwargs.get('target_application_type_version', None) @@ -2109,18 +2259,17 @@ def __init__(self, **kwargs): self.failure_timestamp_utc = kwargs.get('failure_timestamp_utc', None) -class VolumeReference(Model): +class VolumeReference(msrest.serialization.Model): """Describes a reference to a volume resource. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the volume being referenced. :type name: str - :param read_only: The flag indicating whether the volume is read only. - Default is 'false'. + :param read_only: The flag indicating whether the volume is read only. Default is 'false'. :type read_only: bool - :param destination_path: Required. The path within the container at which - the volume should be mounted. Only valid path characters are allowed. + :param destination_path: Required. The path within the container at which the volume should be + mounted. Only valid path characters are allowed. :type destination_path: str """ @@ -2135,11 +2284,14 @@ class VolumeReference(Model): 'destination_path': {'key': 'destinationPath', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(VolumeReference, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.read_only = kwargs.get('read_only', None) - self.destination_path = kwargs.get('destination_path', None) + self.destination_path = kwargs['destination_path'] class ApplicationScopedVolume(VolumeReference): @@ -2149,14 +2301,13 @@ class ApplicationScopedVolume(VolumeReference): :param name: Required. Name of the volume being referenced. :type name: str - :param read_only: The flag indicating whether the volume is read only. - Default is 'false'. + :param read_only: The flag indicating whether the volume is read only. Default is 'false'. :type read_only: bool - :param destination_path: Required. The path within the container at which - the volume should be mounted. Only valid path characters are allowed. + :param destination_path: Required. The path within the container at which the volume should be + mounted. Only valid path characters are allowed. :type destination_path: str - :param creation_parameters: Required. Describes parameters for creating - application-scoped volumes. + :param creation_parameters: Required. Describes parameters for creating application-scoped + volumes. :type creation_parameters: ~azure.servicefabric.models.ApplicationScopedVolumeCreationParameters """ @@ -2174,24 +2325,27 @@ class ApplicationScopedVolume(VolumeReference): 'creation_parameters': {'key': 'creationParameters', 'type': 'ApplicationScopedVolumeCreationParameters'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationScopedVolume, self).__init__(**kwargs) - self.creation_parameters = kwargs.get('creation_parameters', None) + self.creation_parameters = kwargs['creation_parameters'] -class ApplicationScopedVolumeCreationParameters(Model): +class ApplicationScopedVolumeCreationParameters(msrest.serialization.Model): """Describes parameters for creating application-scoped volumes. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: - ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk + sub-classes are: ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk. All required parameters must be populated in order to send to Azure. + :param kind: Required. Specifies the application-scoped volume kind.Constant filled by server. + Possible values include: "ServiceFabricVolumeDisk". + :type kind: str or ~azure.servicefabric.models.ApplicationScopedVolumeKind :param description: User readable description of the volume. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { @@ -2199,32 +2353,34 @@ class ApplicationScopedVolumeCreationParameters(Model): } _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, } _subtype_map = { 'kind': {'ServiceFabricVolumeDisk': 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationScopedVolumeCreationParameters, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.description = kwargs.get('description', None) - self.kind = None class ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk(ApplicationScopedVolumeCreationParameters): - """Describes parameters for creating application-scoped volumes provided by - Service Fabric Volume Disks. + """Describes parameters for creating application-scoped volumes provided by Service Fabric Volume Disks. All required parameters must be populated in order to send to Azure. + :param kind: Required. Specifies the application-scoped volume kind.Constant filled by server. + Possible values include: "ServiceFabricVolumeDisk". + :type kind: str or ~azure.servicefabric.models.ApplicationScopedVolumeKind :param description: User readable description of the volume. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param size_disk: Required. Volume size. Possible values include: 'Small', - 'Medium', 'Large' + :param size_disk: Required. Volume size. Possible values include: "Small", "Medium", "Large". :type size_disk: str or ~azure.servicefabric.models.SizeTypes """ @@ -2234,45 +2390,51 @@ class ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk(Applicati } _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, 'size_disk': {'key': 'sizeDisk', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk, self).__init__(**kwargs) - self.size_disk = kwargs.get('size_disk', None) - self.kind = 'ServiceFabricVolumeDisk' + self.kind = 'ServiceFabricVolumeDisk' # type: str + self.size_disk = kwargs['size_disk'] class ApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for applications, containing health - evaluations for each unhealthy application that impacted current aggregated - health state. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for applications, containing health evaluations for each unhealthy application that impacted current aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_applications: Maximum allowed percentage of - unhealthy applications from the ClusterHealthPolicy. + :param max_percent_unhealthy_applications: Maximum allowed percentage of unhealthy applications + from the ClusterHealthPolicy. :type max_percent_unhealthy_applications: int :param total_count: Total number of applications from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - ApplicationHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy ApplicationHealthEvaluation that impacted the aggregated + health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -2280,59 +2442,59 @@ class ApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationsHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Applications' # type: str self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Applications' class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for applications of a particular application - type. The application type applications evaluation can be returned when - cluster health evaluation returns unhealthy aggregated health state, either - Error or Warning. It contains health evaluations for each unhealthy - application of the included application type that impacted current - aggregated health state. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for applications of a particular application type. The application type applications evaluation can be returned when cluster health evaluation returns unhealthy aggregated health state, either Error or Warning. It contains health evaluations for each unhealthy application of the included application type that impacted current aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param application_type_name: The application type name as defined in the - application manifest. + :param application_type_name: The application type name as defined in the application manifest. :type application_type_name: str - :param max_percent_unhealthy_applications: Maximum allowed percentage of - unhealthy applications for the application type, specified as an entry in - ApplicationTypeHealthPolicyMap. + :param max_percent_unhealthy_applications: Maximum allowed percentage of unhealthy applications + for the application type, specified as an entry in ApplicationTypeHealthPolicyMap. :type max_percent_unhealthy_applications: int - :param total_count: Total number of applications of the application type - found in the health store. + :param total_count: Total number of applications of the application type found in the health + store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - ApplicationHealthEvaluation of this application type that impacted the - aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy ApplicationHealthEvaluation of this application type that + impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -2340,36 +2502,38 @@ class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationTypeApplicationsHealthEvaluation, self).__init__(**kwargs) + self.kind = 'ApplicationTypeApplications' # type: str self.application_type_name = kwargs.get('application_type_name', None) self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'ApplicationTypeApplications' -class ApplicationTypeHealthPolicyMapItem(Model): +class ApplicationTypeHealthPolicyMapItem(msrest.serialization.Model): """Defines an item in ApplicationTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the application type health policy map - item. This is the name of the application type. + :param key: Required. The key of the application type health policy map item. This is the name + of the application type. :type key: str - :param value: Required. The value of the application type health policy - map item. - The max percent unhealthy applications allowed for the application type. - Must be between zero and 100. + :param value: Required. The value of the application type health policy map item. + The max percent unhealthy applications allowed for the application type. Must be between zero + and 100. :type value: int """ @@ -2383,20 +2547,22 @@ class ApplicationTypeHealthPolicyMapItem(Model): 'value': {'key': 'Value', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationTypeHealthPolicyMapItem, self).__init__(**kwargs) - self.key = kwargs.get('key', None) - self.value = kwargs.get('value', None) + self.key = kwargs['key'] + self.value = kwargs['value'] -class ApplicationTypeImageStorePath(Model): - """Path description for the application package in the image store specified - during the prior copy operation. +class ApplicationTypeImageStorePath(msrest.serialization.Model): + """Path description for the application package in the image store specified during the prior copy operation. All required parameters must be populated in order to send to Azure. - :param application_type_build_path: Required. The relative image store - path to the application package. + :param application_type_build_path: Required. The relative image store path to the application + package. :type application_type_build_path: str """ @@ -2408,34 +2574,33 @@ class ApplicationTypeImageStorePath(Model): 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationTypeImageStorePath, self).__init__(**kwargs) - self.application_type_build_path = kwargs.get('application_type_build_path', None) + self.application_type_build_path = kwargs['application_type_build_path'] -class ApplicationTypeInfo(Model): +class ApplicationTypeInfo(msrest.serialization.Model): """Information about an application type. - :param name: The application type name as defined in the application - manifest. + :param name: The application type name as defined in the application manifest. :type name: str - :param version: The version of the application type as defined in the - application manifest. + :param version: The version of the application type as defined in the application manifest. :type version: str - :param default_parameter_list: List of application type parameters that - can be overridden when creating or updating the application. - :type default_parameter_list: - list[~azure.servicefabric.models.ApplicationParameter] - :param status: The status of the application type. Possible values - include: 'Invalid', 'Provisioning', 'Available', 'Unprovisioning', - 'Failed' + :param default_parameter_list: List of application type parameters that can be overridden when + creating or updating the application. + :type default_parameter_list: list[~azure.servicefabric.models.ApplicationParameter] + :param status: The status of the application type. Possible values include: "Invalid", + "Provisioning", "Available", "Unprovisioning", "Failed". :type status: str or ~azure.servicefabric.models.ApplicationTypeStatus - :param status_details: Additional detailed information about the status of - the application type. + :param status_details: Additional detailed information about the status of the application + type. :type status_details: str - :param application_type_definition_kind: The mechanism used to define a - Service Fabric application type. Possible values include: 'Invalid', - 'ServiceFabricApplicationPackage', 'Compose' + :param application_type_definition_kind: The mechanism used to define a Service Fabric + application type. Possible values include: "Invalid", "ServiceFabricApplicationPackage", + "Compose". :type application_type_definition_kind: str or ~azure.servicefabric.models.ApplicationTypeDefinitionKind """ @@ -2449,7 +2614,10 @@ class ApplicationTypeInfo(Model): 'application_type_definition_kind': {'key': 'ApplicationTypeDefinitionKind', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationTypeInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.version = kwargs.get('version', None) @@ -2459,9 +2627,8 @@ def __init__(self, **kwargs): self.application_type_definition_kind = kwargs.get('application_type_definition_kind', None) -class ApplicationTypeManifest(Model): - """Contains the manifest describing an application type registered in a - Service Fabric cluster. +class ApplicationTypeManifest(msrest.serialization.Model): + """Contains the manifest describing an application type registered in a Service Fabric cluster. :param manifest: The XML manifest as a string. :type manifest: str @@ -2471,7 +2638,10 @@ class ApplicationTypeManifest(Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationTypeManifest, self).__init__(**kwargs) self.manifest = kwargs.get('manifest', None) @@ -2481,39 +2651,57 @@ class ApplicationUpgradeCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str :param application_type_version: Required. Application type version. :type application_type_version: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time - in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -2521,94 +2709,80 @@ class ApplicationUpgradeCompletedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationUpgradeCompletedEvent, self).__init__(**kwargs) - self.application_type_name = kwargs.get('application_type_name', None) - self.application_type_version = kwargs.get('application_type_version', None) - self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) - self.kind = 'ApplicationUpgradeCompleted' + self.kind = 'ApplicationUpgradeCompleted' # type: str + self.application_type_name = kwargs['application_type_name'] + self.application_type_version = kwargs['application_type_version'] + self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] -class ApplicationUpgradeDescription(Model): - """Describes the parameters for an application upgrade. Note that upgrade - description replaces the existing application description. This means that - if the parameters are not specified, the existing parameters on the - applications will be overwritten with the empty parameters list. This would - result in the application using the default value of the parameters from - the application manifest. If you do not want to change any existing - parameter values, please get the application parameters first using the - GetApplicationInfo query and then supply those values as Parameters in this - ApplicationUpgradeDescription. +class ApplicationUpgradeDescription(msrest.serialization.Model): + """Describes the parameters for an application upgrade. Note that upgrade description replaces the existing application description. This means that if the parameters are not specified, the existing parameters on the applications will be overwritten with the empty parameters list. This would result in the application using the default value of the parameters from the application manifest. If you do not want to change any existing parameter values, please get the application parameters first using the GetApplicationInfo query and then supply those values as Parameters in this ApplicationUpgradeDescription. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the target application, including the - 'fabric:' URI scheme. + :param name: Required. The name of the target application, including the 'fabric:' URI scheme. :type name: str - :param target_application_type_version: Required. The target application - type version (found in the application manifest) for the application - upgrade. + :param target_application_type_version: Required. The target application type version (found in + the application manifest) for the application upgrade. :type target_application_type_version: str - :param parameters: List of application parameters with overridden values - from their default values specified in the application manifest. + :param parameters: List of application parameters with overridden values from their default + values specified in the application manifest. :type parameters: list[~azure.servicefabric.models.ApplicationParameter] - :param upgrade_kind: Required. The kind of upgrade out of the following - possible values. Possible values include: 'Invalid', 'Rolling'. Default - value: "Rolling" . + :param upgrade_kind: Required. The kind of upgrade out of the following possible values. + Possible values include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through - the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', - 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default - value: "Default" . + :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible + values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", + "ReverseLexicographical". Default value: "Default". :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate - the health of an application or one of its children entities. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param instance_close_delay_duration_in_seconds: Duration in seconds, to - wait before a stateless instance is closed, to allow the active requests - to drain gracefully. This would be effective when the instance is closing - during the application/cluster - upgrade, only for those instances which have a non-zero delay duration - configured in the service description. See - InstanceCloseDelayDurationSeconds property in $ref: + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate the health of an + application or one of its children entities. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a + stateless instance is closed, to allow the active requests to drain gracefully. This would be + effective when the instance is closing during the application/cluster + upgrade, only for those instances which have a non-zero delay duration configured in the + service description. See InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is - 4294967295, which indicates that the behavior will entirely depend on the - delay configured in the stateless service description. + Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates + that the behavior will entirely depend on the delay configured in the stateless service + description. :type instance_close_delay_duration_in_seconds: long + :param managed_application_identity: Managed application identity description. + :type managed_application_identity: + ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ _validation = { @@ -2629,21 +2803,26 @@ class ApplicationUpgradeDescription(Model): 'monitoring_policy': {'key': 'MonitoringPolicy', 'type': 'MonitoringPolicyDescription'}, 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, + 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationUpgradeDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.target_application_type_version = kwargs.get('target_application_type_version', None) + self.name = kwargs['name'] + self.target_application_type_version = kwargs['target_application_type_version'] self.parameters = kwargs.get('parameters', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) - self.force_restart = kwargs.get('force_restart', None) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) + self.force_restart = kwargs.get('force_restart', False) self.sort_order = kwargs.get('sort_order', "Default") self.monitoring_policy = kwargs.get('monitoring_policy', None) self.application_health_policy = kwargs.get('application_health_policy', None) - self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', None) + self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', 4294967295) + self.managed_application_identity = kwargs.get('managed_application_identity', None) class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): @@ -2651,47 +2830,63 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application - type version. + :param current_application_type_version: Required. Current Application type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type - version. + :param application_type_version: Required. Target Application type version. :type application_type_version: str :param upgrade_state: Required. State of upgrade. :type upgrade_state: str :param upgrade_domains: Required. Upgrade domains. :type upgrade_domains: str - :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain - in milli-seconds. + :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain in milli-seconds. :type upgrade_domain_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -2702,11 +2897,11 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -2716,86 +2911,79 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationUpgradeDomainCompletedEvent, self).__init__(**kwargs) - self.application_type_name = kwargs.get('application_type_name', None) - self.current_application_type_version = kwargs.get('current_application_type_version', None) - self.application_type_version = kwargs.get('application_type_version', None) - self.upgrade_state = kwargs.get('upgrade_state', None) - self.upgrade_domains = kwargs.get('upgrade_domains', None) - self.upgrade_domain_elapsed_time_in_ms = kwargs.get('upgrade_domain_elapsed_time_in_ms', None) - self.kind = 'ApplicationUpgradeDomainCompleted' + self.kind = 'ApplicationUpgradeDomainCompleted' # type: str + self.application_type_name = kwargs['application_type_name'] + self.current_application_type_version = kwargs['current_application_type_version'] + self.application_type_version = kwargs['application_type_version'] + self.upgrade_state = kwargs['upgrade_state'] + self.upgrade_domains = kwargs['upgrade_domains'] + self.upgrade_domain_elapsed_time_in_ms = kwargs['upgrade_domain_elapsed_time_in_ms'] -class ApplicationUpgradeProgressInfo(Model): +class ApplicationUpgradeProgressInfo(msrest.serialization.Model): """Describes the parameters for an application upgrade. - :param name: The name of the target application, including the 'fabric:' - URI scheme. + :param name: The name of the target application, including the 'fabric:' URI scheme. :type name: str - :param type_name: The application type name as defined in the application - manifest. + :param type_name: The application type name as defined in the application manifest. :type type_name: str - :param target_application_type_version: The target application type - version (found in the application manifest) for the application upgrade. + :param target_application_type_version: The target application type version (found in the + application manifest) for the application upgrade. :type target_application_type_version: str :param upgrade_domains: List of upgrade domains and their statuses. :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] - :param upgrade_state: The state of the upgrade domain. Possible values - include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', - 'RollingForwardPending', 'RollingForwardInProgress', - 'RollingForwardCompleted', 'Failed' + :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", + "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", + "RollingForwardInProgress", "RollingForwardCompleted", "Failed". :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState - :param next_upgrade_domain: The name of the next upgrade domain to be - processed. + :param next_upgrade_domain: The name of the next upgrade domain to be processed. :type next_upgrade_domain: str - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_description: Describes the parameters for an application - upgrade. Note that upgrade description replaces the existing application - description. This means that if the parameters are not specified, the - existing parameters on the applications will be overwritten with the empty - parameters list. This would result in the application using the default - value of the parameters from the application manifest. If you do not want - to change any existing parameter values, please get the application - parameters first using the GetApplicationInfo query and then supply those - values as Parameters in this ApplicationUpgradeDescription. - :type upgrade_description: - ~azure.servicefabric.models.ApplicationUpgradeDescription - :param upgrade_duration_in_milliseconds: The estimated total amount of - time spent processing the overall upgrade. + :param upgrade_description: Describes the parameters for an application upgrade. Note that + upgrade description replaces the existing application description. This means that if the + parameters are not specified, the existing parameters on the applications will be overwritten + with the empty parameters list. This would result in the application using the default value of + the parameters from the application manifest. If you do not want to change any existing + parameter values, please get the application parameters first using the GetApplicationInfo + query and then supply those values as Parameters in this ApplicationUpgradeDescription. + :type upgrade_description: ~azure.servicefabric.models.ApplicationUpgradeDescription + :param upgrade_duration_in_milliseconds: The estimated total amount of time spent processing + the overall upgrade. :type upgrade_duration_in_milliseconds: str - :param upgrade_domain_duration_in_milliseconds: The estimated total amount - of time spent processing the current upgrade domain. + :param upgrade_domain_duration_in_milliseconds: The estimated total amount of time spent + processing the current upgrade domain. :type upgrade_domain_duration_in_milliseconds: str - :param unhealthy_evaluations: List of health evaluations that resulted in - the current aggregated health state. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current - in-progress upgrade domain. + :param unhealthy_evaluations: List of health evaluations that resulted in the current + aggregated health state. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current in-progress upgrade + domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo - :param start_timestamp_utc: The estimated UTC datetime when the upgrade - started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. :type start_timestamp_utc: str - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade - failed and FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and + FailureAction was executed. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in - FailureAction being executed. Possible values include: 'None', - 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', - 'OverallUpgradeTimeout' + :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being + executed. Possible values include: "None", "Interrupted", "HealthCheck", + "UpgradeDomainTimeout", "OverallUpgradeTimeout". :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: Information about the upgrade - domain progress at the time of upgrade failure. + :param upgrade_domain_progress_at_failure: Information about the upgrade domain progress at the + time of upgrade failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo - :param upgrade_status_details: Additional detailed information about the - status of the pending upgrade. + :param upgrade_status_details: Additional detailed information about the status of the pending + upgrade. :type upgrade_status_details: str """ @@ -2819,7 +3007,10 @@ class ApplicationUpgradeProgressInfo(Model): 'upgrade_status_details': {'key': 'UpgradeStatusDetails', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationUpgradeProgressInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.type_name = kwargs.get('type_name', None) @@ -2845,25 +3036,44 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -2871,15 +3081,14 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): :type application_type_version: str :param failure_reason: Required. Describes reason of failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time - in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -2888,11 +3097,11 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, @@ -2900,13 +3109,16 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationUpgradeRollbackCompletedEvent, self).__init__(**kwargs) - self.application_type_name = kwargs.get('application_type_name', None) - self.application_type_version = kwargs.get('application_type_version', None) - self.failure_reason = kwargs.get('failure_reason', None) - self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) - self.kind = 'ApplicationUpgradeRollbackCompleted' + self.kind = 'ApplicationUpgradeRollbackCompleted' # type: str + self.application_type_name = kwargs['application_type_name'] + self.application_type_version = kwargs['application_type_version'] + self.failure_reason = kwargs['failure_reason'] + self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): @@ -2914,45 +3126,61 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application - type version. + :param current_application_type_version: Required. Current Application type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type - version. + :param application_type_version: Required. Target Application type version. :type application_type_version: str :param failure_reason: Required. Describes reason of failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time - in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -2962,11 +3190,11 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -2975,14 +3203,17 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationUpgradeRollbackStartedEvent, self).__init__(**kwargs) - self.application_type_name = kwargs.get('application_type_name', None) - self.current_application_type_version = kwargs.get('current_application_type_version', None) - self.application_type_version = kwargs.get('application_type_version', None) - self.failure_reason = kwargs.get('failure_reason', None) - self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) - self.kind = 'ApplicationUpgradeRollbackStarted' + self.kind = 'ApplicationUpgradeRollbackStarted' # type: str + self.application_type_name = kwargs['application_type_name'] + self.current_application_type_version = kwargs['current_application_type_version'] + self.application_type_version = kwargs['application_type_version'] + self.failure_reason = kwargs['failure_reason'] + self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] class ApplicationUpgradeStartedEvent(ApplicationEvent): @@ -2990,33 +3221,50 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application - type version. + :param current_application_type_version: Required. Current Application type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type - version. + :param application_type_version: Required. Target Application type version. :type application_type_version: str :param upgrade_type: Required. Type of upgrade. :type upgrade_type: str @@ -3027,9 +3275,9 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -3040,11 +3288,11 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -3054,37 +3302,36 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): 'failure_action': {'key': 'FailureAction', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationUpgradeStartedEvent, self).__init__(**kwargs) - self.application_type_name = kwargs.get('application_type_name', None) - self.current_application_type_version = kwargs.get('current_application_type_version', None) - self.application_type_version = kwargs.get('application_type_version', None) - self.upgrade_type = kwargs.get('upgrade_type', None) - self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', None) - self.failure_action = kwargs.get('failure_action', None) - self.kind = 'ApplicationUpgradeStarted' + self.kind = 'ApplicationUpgradeStarted' # type: str + self.application_type_name = kwargs['application_type_name'] + self.current_application_type_version = kwargs['current_application_type_version'] + self.application_type_version = kwargs['application_type_version'] + self.upgrade_type = kwargs['upgrade_type'] + self.rolling_upgrade_mode = kwargs['rolling_upgrade_mode'] + self.failure_action = kwargs['failure_action'] -class ApplicationUpgradeUpdateDescription(Model): +class ApplicationUpgradeUpdateDescription(msrest.serialization.Model): """Describes the parameters for updating an ongoing application upgrade. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the application, including the - 'fabric:' URI scheme. + :param name: Required. The name of the application, including the 'fabric:' URI scheme. :type name: str - :param upgrade_kind: Required. The kind of upgrade out of the following - possible values. Possible values include: 'Invalid', 'Rolling'. Default - value: "Rolling" . + :param upgrade_kind: Required. The kind of upgrade out of the following possible values. + Possible values include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param application_health_policy: Defines a health policy used to evaluate - the health of an application or one of its children entities. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param update_description: Describes the parameters for updating a rolling - upgrade of application or cluster. - :type update_description: - ~azure.servicefabric.models.RollingUpgradeUpdateDescription + :param application_health_policy: Defines a health policy used to evaluate the health of an + application or one of its children entities. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :param update_description: Describes the parameters for updating a rolling upgrade of + application or cluster. + :type update_description: ~azure.servicefabric.models.RollingUpgradeUpdateDescription """ _validation = { @@ -3099,25 +3346,28 @@ class ApplicationUpgradeUpdateDescription(Model): 'update_description': {'key': 'UpdateDescription', 'type': 'RollingUpgradeUpdateDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ApplicationUpgradeUpdateDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.application_health_policy = kwargs.get('application_health_policy', None) self.update_description = kwargs.get('update_description', None) -class AutoScalingMetric(Model): - """Describes the metric that is used for triggering auto scaling operation. - Derived classes will describe resources or metrics. +class AutoScalingMetric(msrest.serialization.Model): + """Describes the metric that is used for triggering auto scaling operation. Derived classes will describe resources or metrics. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AutoScalingResourceMetric + sub-classes are: AutoScalingResourceMetric. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of auto scaling metric.Constant filled by server. Possible + values include: "Resource". + :type kind: str or ~azure.servicefabric.models.AutoScalingMetricKind """ _validation = { @@ -3132,23 +3382,25 @@ class AutoScalingMetric(Model): 'kind': {'Resource': 'AutoScalingResourceMetric'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AutoScalingMetric, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] -class AutoScalingPolicy(Model): +class AutoScalingPolicy(msrest.serialization.Model): """Describes the auto scaling policy. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the auto scaling policy. :type name: str - :param trigger: Required. Determines when auto scaling operation will be - invoked. + :param trigger: Required. Determines when auto scaling operation will be invoked. :type trigger: ~azure.servicefabric.models.AutoScalingTrigger - :param mechanism: Required. The mechanism that is used to scale when auto - scaling operation is invoked. + :param mechanism: Required. The mechanism that is used to scale when auto scaling operation is + invoked. :type mechanism: ~azure.servicefabric.models.AutoScalingMechanism """ @@ -3164,11 +3416,14 @@ class AutoScalingPolicy(Model): 'mechanism': {'key': 'mechanism', 'type': 'AutoScalingMechanism'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AutoScalingPolicy, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.trigger = kwargs.get('trigger', None) - self.mechanism = kwargs.get('mechanism', None) + self.name = kwargs['name'] + self.trigger = kwargs['trigger'] + self.mechanism = kwargs['mechanism'] class AutoScalingResourceMetric(AutoScalingMetric): @@ -3176,12 +3431,11 @@ class AutoScalingResourceMetric(AutoScalingMetric): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param name: Required. Name of the resource. Possible values include: - 'cpu', 'memoryInGB' - :type name: str or - ~azure.servicefabric.models.AutoScalingResourceMetricName + :param kind: Required. The type of auto scaling metric.Constant filled by server. Possible + values include: "Resource". + :type kind: str or ~azure.servicefabric.models.AutoScalingMetricKind + :param name: Required. Name of the resource. Possible values include: "cpu", "memoryInGB". + :type name: str or ~azure.servicefabric.models.AutoScalingResourceMetricName """ _validation = { @@ -3194,22 +3448,26 @@ class AutoScalingResourceMetric(AutoScalingMetric): 'name': {'key': 'name', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AutoScalingResourceMetric, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.kind = 'Resource' + self.kind = 'Resource' # type: str + self.name = kwargs['name'] -class AutoScalingTrigger(Model): +class AutoScalingTrigger(msrest.serialization.Model): """Describes the trigger for performing auto scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AverageLoadScalingTrigger + sub-classes are: AverageLoadScalingTrigger. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of auto scaling trigger.Constant filled by server. Possible + values include: "AverageLoad". + :type kind: str or ~azure.servicefabric.models.AutoScalingTriggerKind """ _validation = { @@ -3224,9 +3482,12 @@ class AutoScalingTrigger(Model): 'kind': {'AverageLoad': 'AverageLoadScalingTrigger'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AutoScalingTrigger, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class AverageLoadScalingTrigger(AutoScalingTrigger): @@ -3234,19 +3495,19 @@ class AverageLoadScalingTrigger(AutoScalingTrigger): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param metric: Required. Description of the metric that is used for - scaling. + :param kind: Required. The type of auto scaling trigger.Constant filled by server. Possible + values include: "AverageLoad". + :type kind: str or ~azure.servicefabric.models.AutoScalingTriggerKind + :param metric: Required. Description of the metric that is used for scaling. :type metric: ~azure.servicefabric.models.AutoScalingMetric - :param lower_load_threshold: Required. Lower load threshold (if average - load is below this threshold, service will scale down). + :param lower_load_threshold: Required. Lower load threshold (if average load is below this + threshold, service will scale down). :type lower_load_threshold: float - :param upper_load_threshold: Required. Upper load threshold (if average - load is above this threshold, service will scale up). + :param upper_load_threshold: Required. Upper load threshold (if average load is above this + threshold, service will scale up). :type upper_load_threshold: float - :param scale_interval_in_seconds: Required. Scale interval that indicates - how often will this trigger be checked. + :param scale_interval_in_seconds: Required. Scale interval that indicates how often will this + trigger be checked. :type scale_interval_in_seconds: int """ @@ -3266,26 +3527,29 @@ class AverageLoadScalingTrigger(AutoScalingTrigger): 'scale_interval_in_seconds': {'key': 'scaleIntervalInSeconds', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AverageLoadScalingTrigger, self).__init__(**kwargs) - self.metric = kwargs.get('metric', None) - self.lower_load_threshold = kwargs.get('lower_load_threshold', None) - self.upper_load_threshold = kwargs.get('upper_load_threshold', None) - self.scale_interval_in_seconds = kwargs.get('scale_interval_in_seconds', None) - self.kind = 'AverageLoad' + self.kind = 'AverageLoad' # type: str + self.metric = kwargs['metric'] + self.lower_load_threshold = kwargs['lower_load_threshold'] + self.upper_load_threshold = kwargs['upper_load_threshold'] + self.scale_interval_in_seconds = kwargs['scale_interval_in_seconds'] -class ScalingTriggerDescription(Model): +class ScalingTriggerDescription(msrest.serialization.Model): """Describes the trigger for performing a scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AveragePartitionLoadScalingTrigger, - AverageServiceLoadScalingTrigger + sub-classes are: AveragePartitionLoadScalingTrigger, AverageServiceLoadScalingTrigger. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. + Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". + :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind """ _validation = { @@ -3300,30 +3564,32 @@ class ScalingTriggerDescription(Model): 'kind': {'AveragePartitionLoad': 'AveragePartitionLoadScalingTrigger', 'AverageServiceLoad': 'AverageServiceLoadScalingTrigger'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ScalingTriggerDescription, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): - """Represents a scaling trigger related to an average load of a - metric/resource of a partition. + """Represents a scaling trigger related to an average load of a metric/resource of a partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param metric_name: Required. The name of the metric for which usage - should be tracked. + :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. + Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". + :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind + :param metric_name: Required. The name of the metric for which usage should be tracked. :type metric_name: str - :param lower_load_threshold: Required. The lower limit of the load below - which a scale in operation should be performed. + :param lower_load_threshold: Required. The lower limit of the load below which a scale in + operation should be performed. :type lower_load_threshold: str - :param upper_load_threshold: Required. The upper limit of the load beyond - which a scale out operation should be performed. + :param upper_load_threshold: Required. The upper limit of the load beyond which a scale out + operation should be performed. :type upper_load_threshold: str - :param scale_interval_in_seconds: Required. The period in seconds on which - a decision is made whether to scale or not. + :param scale_interval_in_seconds: Required. The period in seconds on which a decision is made + whether to scale or not. :type scale_interval_in_seconds: long """ @@ -3343,35 +3609,44 @@ class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AveragePartitionLoadScalingTrigger, self).__init__(**kwargs) - self.metric_name = kwargs.get('metric_name', None) - self.lower_load_threshold = kwargs.get('lower_load_threshold', None) - self.upper_load_threshold = kwargs.get('upper_load_threshold', None) - self.scale_interval_in_seconds = kwargs.get('scale_interval_in_seconds', None) - self.kind = 'AveragePartitionLoad' + self.kind = 'AveragePartitionLoad' # type: str + self.metric_name = kwargs['metric_name'] + self.lower_load_threshold = kwargs['lower_load_threshold'] + self.upper_load_threshold = kwargs['upper_load_threshold'] + self.scale_interval_in_seconds = kwargs['scale_interval_in_seconds'] class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): - """Represents a scaling policy related to an average load of a metric/resource - of a service. + """Represents a scaling policy related to an average load of a metric/resource of a service. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param metric_name: Required. The name of the metric for which usage - should be tracked. + :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. + Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". + :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind + :param metric_name: Required. The name of the metric for which usage should be tracked. :type metric_name: str - :param lower_load_threshold: Required. The lower limit of the load below - which a scale in operation should be performed. + :param lower_load_threshold: Required. The lower limit of the load below which a scale in + operation should be performed. :type lower_load_threshold: str - :param upper_load_threshold: Required. The upper limit of the load beyond - which a scale out operation should be performed. + :param upper_load_threshold: Required. The upper limit of the load beyond which a scale out + operation should be performed. :type upper_load_threshold: str - :param scale_interval_in_seconds: Required. The period in seconds on which - a decision is made whether to scale or not. + :param scale_interval_in_seconds: Required. The period in seconds on which a decision is made + whether to scale or not. :type scale_interval_in_seconds: long + :param use_only_primary_load: Required. Flag determines whether only the load of primary + replica should be considered for scaling. + If set to true, then trigger will only consider the load of primary replicas of stateful + service. + If set to false, trigger will consider load of all replicas. + This parameter cannot be set to true for stateless service. + :type use_only_primary_load: bool """ _validation = { @@ -3380,6 +3655,7 @@ class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): 'lower_load_threshold': {'required': True}, 'upper_load_threshold': {'required': True}, 'scale_interval_in_seconds': {'required': True, 'maximum': 4294967295, 'minimum': 0}, + 'use_only_primary_load': {'required': True}, } _attribute_map = { @@ -3388,30 +3664,36 @@ class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): 'lower_load_threshold': {'key': 'LowerLoadThreshold', 'type': 'str'}, 'upper_load_threshold': {'key': 'UpperLoadThreshold', 'type': 'str'}, 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, + 'use_only_primary_load': {'key': 'UseOnlyPrimaryLoad', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AverageServiceLoadScalingTrigger, self).__init__(**kwargs) - self.metric_name = kwargs.get('metric_name', None) - self.lower_load_threshold = kwargs.get('lower_load_threshold', None) - self.upper_load_threshold = kwargs.get('upper_load_threshold', None) - self.scale_interval_in_seconds = kwargs.get('scale_interval_in_seconds', None) - self.kind = 'AverageServiceLoad' + self.kind = 'AverageServiceLoad' # type: str + self.metric_name = kwargs['metric_name'] + self.lower_load_threshold = kwargs['lower_load_threshold'] + self.upper_load_threshold = kwargs['upper_load_threshold'] + self.scale_interval_in_seconds = kwargs['scale_interval_in_seconds'] + self.use_only_primary_load = kwargs['use_only_primary_load'] -class BackupStorageDescription(Model): +class BackupStorageDescription(msrest.serialization.Model): """Describes the parameters for the backup storage. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AzureBlobBackupStorageDescription, - FileShareBackupStorageDescription, DsmsAzureBlobBackupStorageDescription + sub-classes are: AzureBlobBackupStorageDescription, DsmsAzureBlobBackupStorageDescription, FileShareBackupStorageDescription, ManagedIdentityAzureBlobBackupStorageDescription. All required parameters must be populated in order to send to Azure. + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_kind: Required. Constant filled by server. - :type storage_kind: str """ _validation = { @@ -3419,35 +3701,38 @@ class BackupStorageDescription(Model): } _attribute_map = { - 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, } _subtype_map = { - 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription', 'DsmsAzureBlobStore': 'DsmsAzureBlobBackupStorageDescription'} + 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'DsmsAzureBlobStore': 'DsmsAzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription', 'ManagedIdentityAzureBlobStore': 'ManagedIdentityAzureBlobBackupStorageDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BackupStorageDescription, self).__init__(**kwargs) + self.storage_kind = None # type: Optional[str] self.friendly_name = kwargs.get('friendly_name', None) - self.storage_kind = None class AzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Azure blob store used for storing and - enumerating backups. + """Describes the parameters for Azure blob store used for storing and enumerating backups. All required parameters must be populated in order to send to Azure. + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_kind: Required. Constant filled by server. - :type storage_kind: str - :param connection_string: Required. The connection string to connect to - the Azure blob store. + :param connection_string: Required. The connection string to connect to the Azure blob store. :type connection_string: str - :param container_name: Required. The name of the container in the blob - store to store and enumerate backups from. + :param container_name: Required. The name of the container in the blob store to store and + enumerate backups from. :type container_name: str """ @@ -3458,34 +3743,37 @@ class AzureBlobBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'connection_string': {'key': 'ConnectionString', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AzureBlobBackupStorageDescription, self).__init__(**kwargs) - self.connection_string = kwargs.get('connection_string', None) - self.container_name = kwargs.get('container_name', None) - self.storage_kind = 'AzureBlobStore' + self.storage_kind = 'AzureBlobStore' # type: str + self.connection_string = kwargs['connection_string'] + self.container_name = kwargs['container_name'] -class DiagnosticsSinkProperties(Model): +class DiagnosticsSinkProperties(msrest.serialization.Model): """Properties of a DiagnosticsSink. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AzureInternalMonitoringPipelineSinkDescription + sub-classes are: AzureInternalMonitoringPipelineSinkDescription. All required parameters must be populated in order to send to Azure. - :param name: Name of the sink. This value is referenced by - DiagnosticsReferenceDescription + :param kind: Required. The kind of DiagnosticsSink.Constant filled by server. Possible values + include: "Invalid", "AzureInternalMonitoringPipeline". + :type kind: str or ~azure.servicefabric.models.DiagnosticsSinkKind + :param name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription. :type name: str :param description: A description of the sink. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { @@ -3493,20 +3781,23 @@ class DiagnosticsSinkProperties(Model): } _attribute_map = { + 'kind': {'key': 'kind', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, - 'kind': {'key': 'kind', 'type': 'str'}, } _subtype_map = { 'kind': {'AzureInternalMonitoringPipeline': 'AzureInternalMonitoringPipelineSinkDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DiagnosticsSinkProperties, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) - self.kind = None class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): @@ -3514,24 +3805,23 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): All required parameters must be populated in order to send to Azure. - :param name: Name of the sink. This value is referenced by - DiagnosticsReferenceDescription + :param kind: Required. The kind of DiagnosticsSink.Constant filled by server. Possible values + include: "Invalid", "AzureInternalMonitoringPipeline". + :type kind: str or ~azure.servicefabric.models.DiagnosticsSinkKind + :param name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription. :type name: str :param description: A description of the sink. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param account_name: Azure Internal monitoring pipeline account. :type account_name: str :param namespace: Azure Internal monitoring pipeline account namespace. :type namespace: str :param ma_config_url: Azure Internal monitoring agent configuration. :type ma_config_url: str - :param fluentd_config_url: Azure Internal monitoring agent fluentd - configuration. + :param fluentd_config_url: Azure Internal monitoring agent fluentd configuration. :type fluentd_config_url: str - :param auto_key_config_url: Azure Internal monitoring pipeline autokey - associated with the certificate. + :param auto_key_config_url: Azure Internal monitoring pipeline autokey associated with the + certificate. :type auto_key_config_url: str """ @@ -3540,9 +3830,9 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): } _attribute_map = { + 'kind': {'key': 'kind', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, - 'kind': {'key': 'kind', 'type': 'str'}, 'account_name': {'key': 'accountName', 'type': 'str'}, 'namespace': {'key': 'namespace', 'type': 'str'}, 'ma_config_url': {'key': 'maConfigUrl', 'type': 'str'}, @@ -3550,53 +3840,49 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): 'auto_key_config_url': {'key': 'autoKeyConfigUrl', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(AzureInternalMonitoringPipelineSinkDescription, self).__init__(**kwargs) + self.kind = 'AzureInternalMonitoringPipeline' # type: str self.account_name = kwargs.get('account_name', None) self.namespace = kwargs.get('namespace', None) self.ma_config_url = kwargs.get('ma_config_url', None) self.fluentd_config_url = kwargs.get('fluentd_config_url', None) self.auto_key_config_url = kwargs.get('auto_key_config_url', None) - self.kind = 'AzureInternalMonitoringPipeline' -class BackupInfo(Model): +class BackupInfo(msrest.serialization.Model): """Represents a backup point which can be used to trigger a restore. :param backup_id: Unique backup ID . :type backup_id: str - :param backup_chain_id: Unique backup chain ID. All backups part of the - same chain has the same backup chain id. A backup chain is comprised of 1 - full backup and multiple incremental backups. + :param backup_chain_id: Unique backup chain ID. All backups part of the same chain has the same + backup chain id. A backup chain is comprised of 1 full backup and multiple incremental backups. :type backup_chain_id: str - :param application_name: Name of the Service Fabric application this - partition backup belongs to. + :param application_name: Name of the Service Fabric application this partition backup belongs + to. :type application_name: str - :param service_name: Name of the Service Fabric service this partition - backup belongs to. + :param service_name: Name of the Service Fabric service this partition backup belongs to. :type service_name: str - :param partition_information: Information about the partition to which - this backup belongs to - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param backup_location: Location of the backup, relative to the backup - store. + :param partition_information: Information about the partition to which this backup belongs to. + :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param backup_location: Location of the backup, relative to the backup store. :type backup_location: str - :param backup_type: Describes the type of backup, whether its full or - incremental. Possible values include: 'Invalid', 'Full', 'Incremental' + :param backup_type: Describes the type of backup, whether its full or incremental. Possible + values include: "Invalid", "Full", "Incremental". :type backup_type: str or ~azure.servicefabric.models.BackupType - :param epoch_of_last_backup_record: Epoch of the last record in this - backup. + :param epoch_of_last_backup_record: Epoch of the last record in this backup. :type epoch_of_last_backup_record: ~azure.servicefabric.models.Epoch :param lsn_of_last_backup_record: LSN of the last record in this backup. :type lsn_of_last_backup_record: str :param creation_time_utc: The date time when this backup was taken. - :type creation_time_utc: datetime - :param service_manifest_version: Manifest Version of the service this - partition backup belongs to. + :type creation_time_utc: ~datetime.datetime + :param service_manifest_version: Manifest Version of the service this partition backup belongs + to. :type service_manifest_version: str - :param failure_error: Denotes the failure encountered in getting backup - point information. + :param failure_error: Denotes the failure encountered in getting backup point information. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -3615,7 +3901,10 @@ class BackupInfo(Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BackupInfo, self).__init__(**kwargs) self.backup_id = kwargs.get('backup_id', None) self.backup_chain_id = kwargs.get('backup_chain_id', None) @@ -3631,11 +3920,10 @@ def __init__(self, **kwargs): self.failure_error = kwargs.get('failure_error', None) -class BackupPartitionDescription(Model): +class BackupPartitionDescription(msrest.serialization.Model): """Describes the parameters for triggering partition's backup. - :param backup_storage: Specifies the details of the backup storage where - to save the backup. + :param backup_storage: Specifies the details of the backup storage where to save the backup. :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription """ @@ -3643,39 +3931,40 @@ class BackupPartitionDescription(Model): 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BackupPartitionDescription, self).__init__(**kwargs) self.backup_storage = kwargs.get('backup_storage', None) -class BackupPolicyDescription(Model): +class BackupPolicyDescription(msrest.serialization.Model): """Describes a backup policy for configuring periodic backup. All required parameters must be populated in order to send to Azure. :param name: Required. The unique name identifying this backup policy. :type name: str - :param auto_restore_on_data_loss: Required. Specifies whether to trigger - restore automatically using the latest available backup in case the - partition experiences a data loss event. + :param auto_restore_on_data_loss: Required. Specifies whether to trigger restore automatically + using the latest available backup in case the partition experiences a data loss event. :type auto_restore_on_data_loss: bool - :param max_incremental_backups: Required. Defines the maximum number of - incremental backups to be taken between two full backups. This is just the - upper limit. A full backup may be taken before specified number of - incremental backups are completed in one of the following conditions - - The replica has never taken a full backup since it has become primary, - - Some of the log records since the last backup has been truncated, or - - Replica passed the MaxAccumulatedBackupLogSizeInMB limit. + :param max_incremental_backups: Required. Defines the maximum number of incremental backups to + be taken between two full backups. This is just the upper limit. A full backup may be taken + before specified number of incremental backups are completed in one of the following conditions + + + * The replica has never taken a full backup since it has become primary, + * Some of the log records since the last backup has been truncated, or + * Replica passed the MaxAccumulatedBackupLogSizeInMB limit. :type max_incremental_backups: int :param schedule: Required. Describes the backup schedule parameters. :type schedule: ~azure.servicefabric.models.BackupScheduleDescription - :param storage: Required. Describes the details of backup storage where to - store the periodic backups. + :param storage: Required. Describes the details of backup storage where to store the periodic + backups. :type storage: ~azure.servicefabric.models.BackupStorageDescription - :param retention_policy: Describes the policy to retain backups in - storage. - :type retention_policy: - ~azure.servicefabric.models.RetentionPolicyDescription + :param retention_policy: Describes the policy to retain backups in storage. + :type retention_policy: ~azure.servicefabric.models.RetentionPolicyDescription """ _validation = { @@ -3695,39 +3984,36 @@ class BackupPolicyDescription(Model): 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicyDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BackupPolicyDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.auto_restore_on_data_loss = kwargs.get('auto_restore_on_data_loss', None) - self.max_incremental_backups = kwargs.get('max_incremental_backups', None) - self.schedule = kwargs.get('schedule', None) - self.storage = kwargs.get('storage', None) + self.name = kwargs['name'] + self.auto_restore_on_data_loss = kwargs['auto_restore_on_data_loss'] + self.max_incremental_backups = kwargs['max_incremental_backups'] + self.schedule = kwargs['schedule'] + self.storage = kwargs['storage'] self.retention_policy = kwargs.get('retention_policy', None) -class BackupProgressInfo(Model): +class BackupProgressInfo(msrest.serialization.Model): """Describes the progress of a partition's backup. - :param backup_state: Represents the current state of the partition backup - operation. Possible values include: 'Invalid', 'Accepted', - 'BackupInProgress', 'Success', 'Failure', 'Timeout' + :param backup_state: Represents the current state of the partition backup operation. Possible + values include: "Invalid", "Accepted", "BackupInProgress", "Success", "Failure", "Timeout". :type backup_state: str or ~azure.servicefabric.models.BackupState - :param time_stamp_utc: TimeStamp in UTC when operation succeeded or - failed. - :type time_stamp_utc: datetime + :param time_stamp_utc: TimeStamp in UTC when operation succeeded or failed. + :type time_stamp_utc: ~datetime.datetime :param backup_id: Unique ID of the newly created backup. :type backup_id: str - :param backup_location: Location, relative to the backup store, of the - newly created backup. + :param backup_location: Location, relative to the backup store, of the newly created backup. :type backup_location: str - :param epoch_of_last_backup_record: Specifies the epoch of the last record - included in backup. + :param epoch_of_last_backup_record: Specifies the epoch of the last record included in backup. :type epoch_of_last_backup_record: ~azure.servicefabric.models.Epoch - :param lsn_of_last_backup_record: The LSN of last record included in - backup. + :param lsn_of_last_backup_record: The LSN of last record included in backup. :type lsn_of_last_backup_record: str - :param failure_error: Denotes the failure encountered in performing backup - operation. + :param failure_error: Denotes the failure encountered in performing backup operation. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -3741,7 +4027,10 @@ class BackupProgressInfo(Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BackupProgressInfo, self).__init__(**kwargs) self.backup_state = kwargs.get('backup_state', None) self.time_stamp_utc = kwargs.get('time_stamp_utc', None) @@ -3752,17 +4041,18 @@ def __init__(self, **kwargs): self.failure_error = kwargs.get('failure_error', None) -class BackupScheduleDescription(Model): +class BackupScheduleDescription(msrest.serialization.Model): """Describes the backup schedule parameters. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FrequencyBasedBackupScheduleDescription, - TimeBasedBackupScheduleDescription + sub-classes are: FrequencyBasedBackupScheduleDescription, TimeBasedBackupScheduleDescription. All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. Constant filled by server. - :type schedule_kind: str + :param schedule_kind: Required. The kind of backup schedule, time based or frequency + based.Constant filled by server. Possible values include: "Invalid", "TimeBased", + "FrequencyBased". + :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind """ _validation = { @@ -3777,22 +4067,22 @@ class BackupScheduleDescription(Model): 'schedule_kind': {'FrequencyBased': 'FrequencyBasedBackupScheduleDescription', 'TimeBased': 'TimeBasedBackupScheduleDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BackupScheduleDescription, self).__init__(**kwargs) - self.schedule_kind = None + self.schedule_kind = None # type: Optional[str] -class BackupSuspensionInfo(Model): +class BackupSuspensionInfo(msrest.serialization.Model): """Describes the backup suspension details. - :param is_suspended: Indicates whether periodic backup is suspended at - this level or not. + :param is_suspended: Indicates whether periodic backup is suspended at this level or not. :type is_suspended: bool - :param suspension_inherited_from: Specifies the scope at which the backup - suspension was applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type suspension_inherited_from: str or - ~azure.servicefabric.models.BackupSuspensionScope + :param suspension_inherited_from: Specifies the scope at which the backup suspension was + applied. Possible values include: "Invalid", "Partition", "Service", "Application". + :type suspension_inherited_from: str or ~azure.servicefabric.models.BackupSuspensionScope """ _attribute_map = { @@ -3800,22 +4090,27 @@ class BackupSuspensionInfo(Model): 'suspension_inherited_from': {'key': 'SuspensionInheritedFrom', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BackupSuspensionInfo, self).__init__(**kwargs) self.is_suspended = kwargs.get('is_suspended', None) self.suspension_inherited_from = kwargs.get('suspension_inherited_from', None) -class RetentionPolicyDescription(Model): +class RetentionPolicyDescription(msrest.serialization.Model): """Describes the retention policy configured. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: BasicRetentionPolicyDescription + sub-classes are: BasicRetentionPolicyDescription. All required parameters must be populated in order to send to Azure. - :param retention_policy_type: Required. Constant filled by server. - :type retention_policy_type: str + :param retention_policy_type: Required. The type of retention policy. Currently only "Basic" + retention policy is supported.Constant filled by server. Possible values include: "Basic", + "Invalid". + :type retention_policy_type: str or ~azure.servicefabric.models.RetentionPolicyType """ _validation = { @@ -3830,9 +4125,12 @@ class RetentionPolicyDescription(Model): 'retention_policy_type': {'Basic': 'BasicRetentionPolicyDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RetentionPolicyDescription, self).__init__(**kwargs) - self.retention_policy_type = None + self.retention_policy_type = None # type: Optional[str] class BasicRetentionPolicyDescription(RetentionPolicyDescription): @@ -3840,16 +4138,17 @@ class BasicRetentionPolicyDescription(RetentionPolicyDescription): All required parameters must be populated in order to send to Azure. - :param retention_policy_type: Required. Constant filled by server. - :type retention_policy_type: str - :param retention_duration: Required. It is the minimum duration for which - a backup created, will remain stored in the storage and might get deleted - after that span of time. It should be specified in ISO8601 format. - :type retention_duration: timedelta - :param minimum_number_of_backups: It is the minimum number of backups to - be retained at any point of time. If specified with a non zero value, - backups will not be deleted even if the backups have gone past retention - duration and have number of backups less than or equal to it. + :param retention_policy_type: Required. The type of retention policy. Currently only "Basic" + retention policy is supported.Constant filled by server. Possible values include: "Basic", + "Invalid". + :type retention_policy_type: str or ~azure.servicefabric.models.RetentionPolicyType + :param retention_duration: Required. It is the minimum duration for which a backup created, + will remain stored in the storage and might get deleted after that span of time. It should be + specified in ISO8601 format. + :type retention_duration: ~datetime.timedelta + :param minimum_number_of_backups: It is the minimum number of backups to be retained at any + point of time. If specified with a non zero value, backups will not be deleted even if the + backups have gone past retention duration and have number of backups less than or equal to it. :type minimum_number_of_backups: int """ @@ -3865,24 +4164,28 @@ class BasicRetentionPolicyDescription(RetentionPolicyDescription): 'minimum_number_of_backups': {'key': 'MinimumNumberOfBackups', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BasicRetentionPolicyDescription, self).__init__(**kwargs) - self.retention_duration = kwargs.get('retention_duration', None) + self.retention_policy_type = 'Basic' # type: str + self.retention_duration = kwargs['retention_duration'] self.minimum_number_of_backups = kwargs.get('minimum_number_of_backups', None) - self.retention_policy_type = 'Basic' -class PropertyValue(Model): +class PropertyValue(msrest.serialization.Model): """Describes a Service Fabric property value. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: BinaryPropertyValue, Int64PropertyValue, - DoublePropertyValue, StringPropertyValue, GuidPropertyValue + sub-classes are: BinaryPropertyValue, DoublePropertyValue, GuidPropertyValue, Int64PropertyValue, StringPropertyValue. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind """ _validation = { @@ -3894,12 +4197,15 @@ class PropertyValue(Model): } _subtype_map = { - 'kind': {'Binary': 'BinaryPropertyValue', 'Int64': 'Int64PropertyValue', 'Double': 'DoublePropertyValue', 'String': 'StringPropertyValue', 'Guid': 'GuidPropertyValue'} + 'kind': {'Binary': 'BinaryPropertyValue', 'Double': 'DoublePropertyValue', 'Guid': 'GuidPropertyValue', 'Int64': 'Int64PropertyValue', 'String': 'StringPropertyValue'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PropertyValue, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class BinaryPropertyValue(PropertyValue): @@ -3907,10 +4213,12 @@ class BinaryPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param data: Required. Array of bytes to be sent as an integer array. Each - element of array is a number between 0 and 255. + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param data: Required. Array of bytes to be sent as an integer array. Each element of array is + a number between 0 and 255. :type data: list[int] """ @@ -3924,25 +4232,26 @@ class BinaryPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': '[int]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(BinaryPropertyValue, self).__init__(**kwargs) - self.data = kwargs.get('data', None) - self.kind = 'Binary' + self.kind = 'Binary' # type: str + self.data = kwargs['data'] -class Chaos(Model): +class Chaos(msrest.serialization.Model): """Contains a description of Chaos. - :param chaos_parameters: If Chaos is running, these are the parameters - Chaos is running with. + :param chaos_parameters: If Chaos is running, these are the parameters Chaos is running with. :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters - :param status: Current status of the Chaos run. Possible values include: - 'Invalid', 'Running', 'Stopped' + :param status: Current status of the Chaos run. Possible values include: "Invalid", "Running", + "Stopped". :type status: str or ~azure.servicefabric.models.ChaosStatus - :param schedule_status: Current status of the schedule. Possible values - include: 'Invalid', 'Stopped', 'Active', 'Expired', 'Pending' - :type schedule_status: str or - ~azure.servicefabric.models.ChaosScheduleStatus + :param schedule_status: Current status of the schedule. Possible values include: "Invalid", + "Stopped", "Active", "Expired", "Pending". + :type schedule_status: str or ~azure.servicefabric.models.ChaosScheduleStatus """ _attribute_map = { @@ -3951,7 +4260,10 @@ class Chaos(Model): 'schedule_status': {'key': 'ScheduleStatus', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(Chaos, self).__init__(**kwargs) self.chaos_parameters = kwargs.get('chaos_parameters', None) self.status = kwargs.get('status', None) @@ -3963,25 +4275,44 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -3993,15 +4324,14 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): :type service_manifest_name: str :param code_package_name: Required. Code package name. :type code_package_name: str - :param service_package_activation_id: Required. Id of Service package - activation. + :param service_package_activation_id: Required. Id of Service package activation. :type service_package_activation_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4012,11 +4342,11 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -4026,27 +4356,26 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosCodePackageRestartScheduledEvent, self).__init__(**kwargs) - self.fault_group_id = kwargs.get('fault_group_id', None) - self.fault_id = kwargs.get('fault_id', None) - self.node_name = kwargs.get('node_name', None) - self.service_manifest_name = kwargs.get('service_manifest_name', None) - self.code_package_name = kwargs.get('code_package_name', None) - self.service_package_activation_id = kwargs.get('service_package_activation_id', None) - self.kind = 'ChaosCodePackageRestartScheduled' + self.kind = 'ChaosCodePackageRestartScheduled' # type: str + self.fault_group_id = kwargs['fault_group_id'] + self.fault_id = kwargs['fault_id'] + self.node_name = kwargs['node_name'] + self.service_manifest_name = kwargs['service_manifest_name'] + self.code_package_name = kwargs['code_package_name'] + self.service_package_activation_id = kwargs['service_package_activation_id'] -class ChaosContext(Model): - """Describes a map, which is a collection of (string, string) type key-value - pairs. The map can be used to record information about - the Chaos run. There cannot be more than 100 such pairs and each string - (key or value) can be at most 4095 characters long. - This map is set by the starter of the Chaos run to optionally store the - context about the specific run. +class ChaosContext(msrest.serialization.Model): + """Describes a map, which is a collection of (string, string) type key-value pairs. The map can be used to record information about +the Chaos run. There cannot be more than 100 such pairs and each string (key or value) can be at most 4095 characters long. +This map is set by the starter of the Chaos run to optionally store the context about the specific run. - :param map: Describes a map that contains a collection of - ChaosContextMapItem's. + :param map: Describes a map that contains a collection of ChaosContextMapItem's. :type map: dict[str, str] """ @@ -4054,58 +4383,61 @@ class ChaosContext(Model): 'map': {'key': 'Map', 'type': '{str}'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosContext, self).__init__(**kwargs) self.map = kwargs.get('map', None) -class ChaosEvent(Model): +class ChaosEvent(msrest.serialization.Model): """Represents an event generated during a Chaos run. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ExecutingFaultsChaosEvent, StartedChaosEvent, - StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, - WaitingChaosEvent + sub-classes are: ExecutingFaultsChaosEvent, StartedChaosEvent, StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, WaitingChaosEvent. All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, } _subtype_map = { 'kind': {'ExecutingFaults': 'ExecutingFaultsChaosEvent', 'Started': 'StartedChaosEvent', 'Stopped': 'StoppedChaosEvent', 'TestError': 'TestErrorChaosEvent', 'ValidationFailed': 'ValidationFailedChaosEvent', 'Waiting': 'WaitingChaosEvent'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosEvent, self).__init__(**kwargs) - self.time_stamp_utc = kwargs.get('time_stamp_utc', None) - self.kind = None + self.kind = None # type: Optional[str] + self.time_stamp_utc = kwargs['time_stamp_utc'] -class ChaosEventsSegment(Model): - """Contains the list of Chaos events and the continuation token to get the - next segment. +class ChaosEventsSegment(msrest.serialization.Model): + """Contains the list of Chaos events and the continuation token to get the next segment. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param history: List of Chaos events that meet the user-supplied criteria. :type history: list[~azure.servicefabric.models.ChaosEventWrapper] @@ -4116,13 +4448,16 @@ class ChaosEventsSegment(Model): 'history': {'key': 'History', 'type': '[ChaosEventWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosEventsSegment, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.history = kwargs.get('history', None) -class ChaosEventWrapper(Model): +class ChaosEventWrapper(msrest.serialization.Model): """Wrapper object for Chaos event. :param chaos_event: Represents an event generated during a Chaos run. @@ -4133,7 +4468,10 @@ class ChaosEventWrapper(Model): 'chaos_event': {'key': 'ChaosEvent', 'type': 'ChaosEvent'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosEventWrapper, self).__init__(**kwargs) self.chaos_event = kwargs.get('chaos_event', None) @@ -4142,54 +4480,73 @@ class NodeEvent(FabricEvent): """Represents the base for all Node Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeAbortedEvent, NodeAddedToClusterEvent, - NodeClosedEvent, NodeDeactivateCompletedEvent, NodeDeactivateStartedEvent, - NodeDownEvent, NodeNewHealthReportEvent, NodeHealthReportExpiredEvent, - NodeOpenSucceededEvent, NodeOpenFailedEvent, NodeRemovedFromClusterEvent, - NodeUpEvent, ChaosNodeRestartScheduledEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ChaosNodeRestartScheduledEvent, NodeAbortedEvent, NodeAddedToClusterEvent, NodeClosedEvent, NodeDeactivateCompletedEvent, NodeDeactivateStartedEvent, NodeDownEvent, NodeHealthReportExpiredEvent, NodeNewHealthReportEvent, NodeOpenFailedEvent, NodeOpenSucceededEvent, NodeRemovedFromClusterEvent, NodeUpEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, } _subtype_map = { - 'kind': {'NodeAborted': 'NodeAbortedEvent', 'NodeAddedToCluster': 'NodeAddedToClusterEvent', 'NodeClosed': 'NodeClosedEvent', 'NodeDeactivateCompleted': 'NodeDeactivateCompletedEvent', 'NodeDeactivateStarted': 'NodeDeactivateStartedEvent', 'NodeDown': 'NodeDownEvent', 'NodeNewHealthReport': 'NodeNewHealthReportEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeOpenSucceeded': 'NodeOpenSucceededEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeRemovedFromCluster': 'NodeRemovedFromClusterEvent', 'NodeUp': 'NodeUpEvent', 'ChaosNodeRestartScheduled': 'ChaosNodeRestartScheduledEvent'} + 'kind': {'ChaosNodeRestartScheduled': 'ChaosNodeRestartScheduledEvent', 'NodeAborted': 'NodeAbortedEvent', 'NodeAddedToCluster': 'NodeAddedToClusterEvent', 'NodeClosed': 'NodeClosedEvent', 'NodeDeactivateCompleted': 'NodeDeactivateCompletedEvent', 'NodeDeactivateStarted': 'NodeDeactivateStartedEvent', 'NodeDown': 'NodeDownEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeNewHealthReport': 'NodeNewHealthReportEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeOpenSucceeded': 'NodeOpenSucceededEvent', 'NodeRemovedFromCluster': 'NodeRemovedFromClusterEvent', 'NodeUp': 'NodeUpEvent'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeEvent, self).__init__(**kwargs) - self.node_name = kwargs.get('node_name', None) - self.kind = 'NodeEvent' + self.kind = 'NodeEvent' # type: str + self.node_name = kwargs['node_name'] class ChaosNodeRestartScheduledEvent(NodeEvent): @@ -4197,18 +4554,38 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -4220,9 +4597,9 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -4230,85 +4607,79 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosNodeRestartScheduledEvent, self).__init__(**kwargs) - self.node_instance_id = kwargs.get('node_instance_id', None) - self.fault_group_id = kwargs.get('fault_group_id', None) - self.fault_id = kwargs.get('fault_id', None) - self.kind = 'ChaosNodeRestartScheduled' + self.kind = 'ChaosNodeRestartScheduled' # type: str + self.node_instance_id = kwargs['node_instance_id'] + self.fault_group_id = kwargs['fault_group_id'] + self.fault_id = kwargs['fault_id'] -class ChaosParameters(Model): +class ChaosParameters(msrest.serialization.Model): """Defines all the parameters to configure a Chaos run. - :param time_to_run_in_seconds: Total time (in seconds) for which Chaos - will run before automatically stopping. The maximum allowed value is - 4,294,967,295 (System.UInt32.MaxValue). Default value: "4294967295" . + :param time_to_run_in_seconds: Total time (in seconds) for which Chaos will run before + automatically stopping. The maximum allowed value is 4,294,967,295 (System.UInt32.MaxValue). :type time_to_run_in_seconds: str - :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of - time to wait for all cluster entities to become stable and healthy. Chaos - executes in iterations and at the start of each iteration it validates the - health of cluster entities. + :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of time to wait for all + cluster entities to become stable and healthy. Chaos executes in iterations and at the start of + each iteration it validates the health of cluster entities. During validation if a cluster entity is not stable and healthy within - MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation - failed event. Default value: 60 . + MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation failed event. :type max_cluster_stabilization_timeout_in_seconds: long - :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of - concurrent faults induced per iteration. - Chaos executes in iterations and two consecutive iterations are separated - by a validation phase. - The higher the concurrency, the more aggressive the injection of faults, - leading to inducing more complex series of states to uncover bugs. - The recommendation is to start with a value of 2 or 3 and to exercise - caution while moving up. Default value: 1 . + :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of concurrent faults + induced per iteration. + Chaos executes in iterations and two consecutive iterations are separated by a validation + phase. + The higher the concurrency, the more aggressive the injection of faults, leading to inducing + more complex series of states to uncover bugs. + The recommendation is to start with a value of 2 or 3 and to exercise caution while moving up. :type max_concurrent_faults: long - :param enable_move_replica_faults: Enables or disables the move primary - and move secondary faults. Default value: True . + :param enable_move_replica_faults: Enables or disables the move primary and move secondary + faults. :type enable_move_replica_faults: bool - :param wait_time_between_faults_in_seconds: Wait time (in seconds) between - consecutive faults within a single iteration. - The larger the value, the lower the overlapping between faults and the - simpler the sequence of state transitions that the cluster goes through. - The recommendation is to start with a value between 1 and 5 and exercise - caution while moving up. Default value: 20 . + :param wait_time_between_faults_in_seconds: Wait time (in seconds) between consecutive faults + within a single iteration. + The larger the value, the lower the overlapping between faults and the simpler the sequence of + state transitions that the cluster goes through. + The recommendation is to start with a value between 1 and 5 and exercise caution while moving + up. :type wait_time_between_faults_in_seconds: long - :param wait_time_between_iterations_in_seconds: Time-separation (in - seconds) between two consecutive iterations of Chaos. - The larger the value, the lower the fault injection rate. Default value: - 30 . + :param wait_time_between_iterations_in_seconds: Time-separation (in seconds) between two + consecutive iterations of Chaos. + The larger the value, the lower the fault injection rate. :type wait_time_between_iterations_in_seconds: long - :param cluster_health_policy: Passed-in cluster health policy is used to - validate health of the cluster in between Chaos iterations. If the cluster - health is in error or if an unexpected exception happens during fault - execution--to provide the cluster with some time to recuperate--Chaos will - wait for 30 minutes before the next health-check. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param context: Describes a map, which is a collection of (string, string) - type key-value pairs. The map can be used to record information about - the Chaos run. There cannot be more than 100 such pairs and each string - (key or value) can be at most 4095 characters long. - This map is set by the starter of the Chaos run to optionally store the - context about the specific run. + :param cluster_health_policy: Passed-in cluster health policy is used to validate health of the + cluster in between Chaos iterations. If the cluster health is in error or if an unexpected + exception happens during fault execution--to provide the cluster with some time to + recuperate--Chaos will wait for 30 minutes before the next health-check. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param context: Describes a map, which is a collection of (string, string) type key-value + pairs. The map can be used to record information about + the Chaos run. There cannot be more than 100 such pairs and each string (key or value) can be + at most 4095 characters long. + This map is set by the starter of the Chaos run to optionally store the context about the + specific run. :type context: ~azure.servicefabric.models.ChaosContext - :param chaos_target_filter: List of cluster entities to target for Chaos - faults. - This filter can be used to target Chaos faults only to certain node types - or only to certain application instances. If ChaosTargetFilter is not - used, Chaos faults all cluster entities. - If ChaosTargetFilter is used, Chaos faults only the entities that meet the - ChaosTargetFilter specification. + :param chaos_target_filter: List of cluster entities to target for Chaos faults. + This filter can be used to target Chaos faults only to certain node types or only to certain + application instances. If ChaosTargetFilter is not used, Chaos faults all cluster entities. + If ChaosTargetFilter is used, Chaos faults only the entities that meet the ChaosTargetFilter + specification. :type chaos_target_filter: ~azure.servicefabric.models.ChaosTargetFilter """ @@ -4331,7 +4702,10 @@ class ChaosParameters(Model): 'chaos_target_filter': {'key': 'ChaosTargetFilter', 'type': 'ChaosTargetFilter'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosParameters, self).__init__(**kwargs) self.time_to_run_in_seconds = kwargs.get('time_to_run_in_seconds', "4294967295") self.max_cluster_stabilization_timeout_in_seconds = kwargs.get('max_cluster_stabilization_timeout_in_seconds', 60) @@ -4344,16 +4718,15 @@ def __init__(self, **kwargs): self.chaos_target_filter = kwargs.get('chaos_target_filter', None) -class ChaosParametersDictionaryItem(Model): +class ChaosParametersDictionaryItem(msrest.serialization.Model): """Defines an item in ChaosParametersDictionary of the Chaos Schedule. All required parameters must be populated in order to send to Azure. - :param key: Required. The key identifying the Chaos Parameter in the - dictionary. This key is referenced by Chaos Schedule Jobs. + :param key: Required. The key identifying the Chaos Parameter in the dictionary. This key is + referenced by Chaos Schedule Jobs. :type key: str - :param value: Required. Defines all the parameters to configure a Chaos - run. + :param value: Required. Defines all the parameters to configure a Chaos run. :type value: ~azure.servicefabric.models.ChaosParameters """ @@ -4367,67 +4740,89 @@ class ChaosParametersDictionaryItem(Model): 'value': {'key': 'Value', 'type': 'ChaosParameters'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosParametersDictionaryItem, self).__init__(**kwargs) - self.key = kwargs.get('key', None) - self.value = kwargs.get('value', None) + self.key = kwargs['key'] + self.value = kwargs['value'] class PartitionEvent(FabricEvent): """Represents the base for all Partition Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionAnalysisEvent, PartitionNewHealthReportEvent, - PartitionHealthReportExpiredEvent, PartitionReconfiguredEvent, - ChaosPartitionSecondaryMoveScheduledEvent, - ChaosPartitionPrimaryMoveScheduledEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ChaosPartitionPrimaryMoveScheduledEvent, ChaosPartitionSecondaryMoveScheduledEvent, PartitionAnalysisEvent, PartitionHealthReportExpiredEvent, PartitionNewHealthReportEvent, PartitionReconfiguredEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } _subtype_map = { - 'kind': {'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionNewHealthReport': 'PartitionNewHealthReportEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionReconfigured': 'PartitionReconfiguredEvent', 'ChaosPartitionSecondaryMoveScheduled': 'ChaosPartitionSecondaryMoveScheduledEvent', 'ChaosPartitionPrimaryMoveScheduled': 'ChaosPartitionPrimaryMoveScheduledEvent'} + 'kind': {'ChaosPartitionPrimaryMoveScheduled': 'ChaosPartitionPrimaryMoveScheduledEvent', 'ChaosPartitionSecondaryMoveScheduled': 'ChaosPartitionSecondaryMoveScheduledEvent', 'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionNewHealthReport': 'PartitionNewHealthReportEvent', 'PartitionReconfigured': 'PartitionReconfiguredEvent'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionEvent, self).__init__(**kwargs) - self.partition_id = kwargs.get('partition_id', None) - self.kind = 'PartitionEvent' + self.kind = 'PartitionEvent' # type: str + self.partition_id = kwargs['partition_id'] class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): @@ -4435,23 +4830,42 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4466,9 +4880,9 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4478,11 +4892,11 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -4491,14 +4905,17 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosPartitionPrimaryMoveScheduledEvent, self).__init__(**kwargs) - self.fault_group_id = kwargs.get('fault_group_id', None) - self.fault_id = kwargs.get('fault_id', None) - self.service_name = kwargs.get('service_name', None) - self.node_to = kwargs.get('node_to', None) - self.forced_move = kwargs.get('forced_move', None) - self.kind = 'ChaosPartitionPrimaryMoveScheduled' + self.kind = 'ChaosPartitionPrimaryMoveScheduled' # type: str + self.fault_group_id = kwargs['fault_group_id'] + self.fault_id = kwargs['fault_id'] + self.service_name = kwargs['service_name'] + self.node_to = kwargs['node_to'] + self.forced_move = kwargs['forced_move'] class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): @@ -4506,23 +4923,42 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4539,9 +4975,9 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4552,11 +4988,11 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -4566,84 +5002,103 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosPartitionSecondaryMoveScheduledEvent, self).__init__(**kwargs) - self.fault_group_id = kwargs.get('fault_group_id', None) - self.fault_id = kwargs.get('fault_id', None) - self.service_name = kwargs.get('service_name', None) - self.source_node = kwargs.get('source_node', None) - self.destination_node = kwargs.get('destination_node', None) - self.forced_move = kwargs.get('forced_move', None) - self.kind = 'ChaosPartitionSecondaryMoveScheduled' + self.kind = 'ChaosPartitionSecondaryMoveScheduled' # type: str + self.fault_group_id = kwargs['fault_group_id'] + self.fault_id = kwargs['fault_id'] + self.service_name = kwargs['service_name'] + self.source_node = kwargs['source_node'] + self.destination_node = kwargs['destination_node'] + self.forced_move = kwargs['forced_move'] class ReplicaEvent(FabricEvent): """Represents the base for all Replica Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulReplicaNewHealthReportEvent, - StatefulReplicaHealthReportExpiredEvent, - StatelessReplicaNewHealthReportEvent, - StatelessReplicaHealthReportExpiredEvent, - ChaosReplicaRemovalScheduledEvent, ChaosReplicaRestartScheduledEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ChaosReplicaRemovalScheduledEvent, ChaosReplicaRestartScheduledEvent, StatefulReplicaHealthReportExpiredEvent, StatefulReplicaNewHealthReportEvent, StatelessReplicaHealthReportExpiredEvent, StatelessReplicaNewHealthReportEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, } _subtype_map = { - 'kind': {'StatefulReplicaNewHealthReport': 'StatefulReplicaNewHealthReportEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatelessReplicaNewHealthReport': 'StatelessReplicaNewHealthReportEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'ChaosReplicaRemovalScheduled': 'ChaosReplicaRemovalScheduledEvent', 'ChaosReplicaRestartScheduled': 'ChaosReplicaRestartScheduledEvent'} + 'kind': {'ChaosReplicaRemovalScheduled': 'ChaosReplicaRemovalScheduledEvent', 'ChaosReplicaRestartScheduled': 'ChaosReplicaRestartScheduledEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatefulReplicaNewHealthReport': 'StatefulReplicaNewHealthReportEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'StatelessReplicaNewHealthReport': 'StatelessReplicaNewHealthReportEvent'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaEvent, self).__init__(**kwargs) - self.partition_id = kwargs.get('partition_id', None) - self.replica_id = kwargs.get('replica_id', None) - self.kind = 'ReplicaEvent' + self.kind = 'ReplicaEvent' # type: str + self.partition_id = kwargs['partition_id'] + self.replica_id = kwargs['replica_id'] class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): @@ -4651,31 +5106,48 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4686,9 +5158,9 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -4697,11 +5169,11 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, @@ -4709,12 +5181,15 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosReplicaRemovalScheduledEvent, self).__init__(**kwargs) - self.fault_group_id = kwargs.get('fault_group_id', None) - self.fault_id = kwargs.get('fault_id', None) - self.service_uri = kwargs.get('service_uri', None) - self.kind = 'ChaosReplicaRemovalScheduled' + self.kind = 'ChaosReplicaRemovalScheduled' # type: str + self.fault_group_id = kwargs['fault_group_id'] + self.fault_id = kwargs['fault_id'] + self.service_uri = kwargs['service_uri'] class ChaosReplicaRestartScheduledEvent(ReplicaEvent): @@ -4722,31 +5197,48 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4757,9 +5249,9 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -4768,11 +5260,11 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, @@ -4780,29 +5272,29 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosReplicaRestartScheduledEvent, self).__init__(**kwargs) - self.fault_group_id = kwargs.get('fault_group_id', None) - self.fault_id = kwargs.get('fault_id', None) - self.service_uri = kwargs.get('service_uri', None) - self.kind = 'ChaosReplicaRestartScheduled' + self.kind = 'ChaosReplicaRestartScheduled' # type: str + self.fault_group_id = kwargs['fault_group_id'] + self.fault_id = kwargs['fault_id'] + self.service_uri = kwargs['service_uri'] -class ChaosSchedule(Model): +class ChaosSchedule(msrest.serialization.Model): """Defines the schedule used by Chaos. :param start_date: The date and time Chaos will start using this schedule. - Default value: "1601-01-01T00:00:00Z" . - :type start_date: datetime - :param expiry_date: The date and time Chaos will continue to use this - schedule until. Default value: "9999-12-31T23:59:59.999Z" . - :type expiry_date: datetime - :param chaos_parameters_dictionary: A mapping of string names to Chaos - Parameters to be referenced by Chaos Schedule Jobs. + :type start_date: ~datetime.datetime + :param expiry_date: The date and time Chaos will continue to use this schedule until. + :type expiry_date: ~datetime.datetime + :param chaos_parameters_dictionary: A mapping of string names to Chaos Parameters to be + referenced by Chaos Schedule Jobs. :type chaos_parameters_dictionary: list[~azure.servicefabric.models.ChaosParametersDictionaryItem] - :param jobs: A list of all Chaos Schedule Jobs that will be automated by - the schedule. + :param jobs: A list of all Chaos Schedule Jobs that will be automated by the schedule. :type jobs: list[~azure.servicefabric.models.ChaosScheduleJob] """ @@ -4813,7 +5305,10 @@ class ChaosSchedule(Model): 'jobs': {'key': 'Jobs', 'type': '[ChaosScheduleJob]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosSchedule, self).__init__(**kwargs) self.start_date = kwargs.get('start_date', "1601-01-01T00:00:00Z") self.expiry_date = kwargs.get('expiry_date', "9999-12-31T23:59:59.999Z") @@ -4821,9 +5316,8 @@ def __init__(self, **kwargs): self.jobs = kwargs.get('jobs', None) -class ChaosScheduleDescription(Model): - """Defines the Chaos Schedule used by Chaos and the version of the Chaos - Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. +class ChaosScheduleDescription(msrest.serialization.Model): + """Defines the Chaos Schedule used by Chaos and the version of the Chaos Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. :param version: The version number of the Schedule. :type version: int @@ -4840,24 +5334,24 @@ class ChaosScheduleDescription(Model): 'schedule': {'key': 'Schedule', 'type': 'ChaosSchedule'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosScheduleDescription, self).__init__(**kwargs) self.version = kwargs.get('version', None) self.schedule = kwargs.get('schedule', None) -class ChaosScheduleJob(Model): - """Defines a repetition rule and parameters of Chaos to be used with the Chaos - Schedule. +class ChaosScheduleJob(msrest.serialization.Model): + """Defines a repetition rule and parameters of Chaos to be used with the Chaos Schedule. - :param chaos_parameters: A reference to which Chaos Parameters of the - Chaos Schedule to use. + :param chaos_parameters: A reference to which Chaos Parameters of the Chaos Schedule to use. :type chaos_parameters: str - :param days: Defines the days of the week that a Chaos Schedule Job will - run for. + :param days: Defines the days of the week that a Chaos Schedule Job will run for. :type days: ~azure.servicefabric.models.ChaosScheduleJobActiveDaysOfWeek - :param times: A list of Time Ranges that specify when during active days - that this job will run. The times are interpreted as UTC. + :param times: A list of Time Ranges that specify when during active days that this job will + run. The times are interpreted as UTC. :type times: list[~azure.servicefabric.models.TimeRange] """ @@ -4867,36 +5361,32 @@ class ChaosScheduleJob(Model): 'times': {'key': 'Times', 'type': '[TimeRange]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosScheduleJob, self).__init__(**kwargs) self.chaos_parameters = kwargs.get('chaos_parameters', None) self.days = kwargs.get('days', None) self.times = kwargs.get('times', None) -class ChaosScheduleJobActiveDaysOfWeek(Model): +class ChaosScheduleJobActiveDaysOfWeek(msrest.serialization.Model): """Defines the days of the week that a Chaos Schedule Job will run for. :param sunday: Indicates if the Chaos Schedule Job will run on Sunday. - Default value: False . :type sunday: bool :param monday: Indicates if the Chaos Schedule Job will run on Monday. - Default value: False . :type monday: bool :param tuesday: Indicates if the Chaos Schedule Job will run on Tuesday. - Default value: False . :type tuesday: bool - :param wednesday: Indicates if the Chaos Schedule Job will run on - Wednesday. Default value: False . + :param wednesday: Indicates if the Chaos Schedule Job will run on Wednesday. :type wednesday: bool :param thursday: Indicates if the Chaos Schedule Job will run on Thursday. - Default value: False . :type thursday: bool :param friday: Indicates if the Chaos Schedule Job will run on Friday. - Default value: False . :type friday: bool :param saturday: Indicates if the Chaos Schedule Job will run on Saturday. - Default value: False . :type saturday: bool """ @@ -4910,7 +5400,10 @@ class ChaosScheduleJobActiveDaysOfWeek(Model): 'saturday': {'key': 'Saturday', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosScheduleJobActiveDaysOfWeek, self).__init__(**kwargs) self.sunday = kwargs.get('sunday', False) self.monday = kwargs.get('monday', False) @@ -4925,49 +5418,68 @@ class ClusterEvent(FabricEvent): """Represents the base for all Cluster Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ClusterNewHealthReportEvent, - ClusterHealthReportExpiredEvent, ClusterUpgradeCompletedEvent, - ClusterUpgradeDomainCompletedEvent, ClusterUpgradeRollbackCompletedEvent, - ClusterUpgradeRollbackStartedEvent, ClusterUpgradeStartedEvent, - ChaosStoppedEvent, ChaosStartedEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ChaosStartedEvent, ChaosStoppedEvent, ClusterHealthReportExpiredEvent, ClusterNewHealthReportEvent, ClusterUpgradeCompletedEvent, ClusterUpgradeDomainCompletedEvent, ClusterUpgradeRollbackCompletedEvent, ClusterUpgradeRollbackStartedEvent, ClusterUpgradeStartedEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'ClusterNewHealthReport': 'ClusterNewHealthReportEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterUpgradeCompleted': 'ClusterUpgradeCompletedEvent', 'ClusterUpgradeDomainCompleted': 'ClusterUpgradeDomainCompletedEvent', 'ClusterUpgradeRollbackCompleted': 'ClusterUpgradeRollbackCompletedEvent', 'ClusterUpgradeRollbackStarted': 'ClusterUpgradeRollbackStartedEvent', 'ClusterUpgradeStarted': 'ClusterUpgradeStartedEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ChaosStarted': 'ChaosStartedEvent'} + 'kind': {'ChaosStarted': 'ChaosStartedEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterNewHealthReport': 'ClusterNewHealthReportEvent', 'ClusterUpgradeCompleted': 'ClusterUpgradeCompletedEvent', 'ClusterUpgradeDomainCompleted': 'ClusterUpgradeDomainCompletedEvent', 'ClusterUpgradeRollbackCompleted': 'ClusterUpgradeRollbackCompletedEvent', 'ClusterUpgradeRollbackStarted': 'ClusterUpgradeRollbackStartedEvent', 'ClusterUpgradeStarted': 'ClusterUpgradeStartedEvent'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterEvent, self).__init__(**kwargs) - self.kind = 'ClusterEvent' + self.kind = 'ClusterEvent' # type: str class ChaosStartedEvent(ClusterEvent): @@ -4975,34 +5487,51 @@ class ChaosStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param max_concurrent_faults: Required. Maximum number of concurrent - faults. + :param max_concurrent_faults: Required. Maximum number of concurrent faults. :type max_concurrent_faults: long :param time_to_run_in_seconds: Required. Time to run in seconds. :type time_to_run_in_seconds: float - :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum - timeout for cluster stabilization in seconds. + :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum timeout for cluster + stabilization in seconds. :type max_cluster_stabilization_timeout_in_seconds: float - :param wait_time_between_iterations_in_seconds: Required. Wait time - between iterations in seconds. + :param wait_time_between_iterations_in_seconds: Required. Wait time between iterations in + seconds. :type wait_time_between_iterations_in_seconds: float - :param wait_time_between_faults_in_seconds: Required. Wait time between - faults in seconds. + :param wait_time_between_faults_in_seconds: Required. Wait time between faults in seconds. :type wait_time_between_faults_in_seconds: float - :param move_replica_fault_enabled: Required. Indicates MoveReplica fault - is enabled. + :param move_replica_fault_enabled: Required. Indicates MoveReplica fault is enabled. :type move_replica_fault_enabled: bool :param included_node_type_list: Required. List of included Node types. :type included_node_type_list: str @@ -5015,9 +5544,9 @@ class ChaosStartedEvent(ClusterEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'max_concurrent_faults': {'required': True}, 'time_to_run_in_seconds': {'required': True}, 'max_cluster_stabilization_timeout_in_seconds': {'required': True}, @@ -5031,11 +5560,11 @@ class ChaosStartedEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_concurrent_faults': {'key': 'MaxConcurrentFaults', 'type': 'long'}, 'time_to_run_in_seconds': {'key': 'TimeToRunInSeconds', 'type': 'float'}, 'max_cluster_stabilization_timeout_in_seconds': {'key': 'MaxClusterStabilizationTimeoutInSeconds', 'type': 'float'}, @@ -5048,19 +5577,22 @@ class ChaosStartedEvent(ClusterEvent): 'chaos_context': {'key': 'ChaosContext', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosStartedEvent, self).__init__(**kwargs) - self.max_concurrent_faults = kwargs.get('max_concurrent_faults', None) - self.time_to_run_in_seconds = kwargs.get('time_to_run_in_seconds', None) - self.max_cluster_stabilization_timeout_in_seconds = kwargs.get('max_cluster_stabilization_timeout_in_seconds', None) - self.wait_time_between_iterations_in_seconds = kwargs.get('wait_time_between_iterations_in_seconds', None) - self.wait_time_between_faults_in_seconds = kwargs.get('wait_time_between_faults_in_seconds', None) - self.move_replica_fault_enabled = kwargs.get('move_replica_fault_enabled', None) - self.included_node_type_list = kwargs.get('included_node_type_list', None) - self.included_application_list = kwargs.get('included_application_list', None) - self.cluster_health_policy = kwargs.get('cluster_health_policy', None) - self.chaos_context = kwargs.get('chaos_context', None) - self.kind = 'ChaosStarted' + self.kind = 'ChaosStarted' # type: str + self.max_concurrent_faults = kwargs['max_concurrent_faults'] + self.time_to_run_in_seconds = kwargs['time_to_run_in_seconds'] + self.max_cluster_stabilization_timeout_in_seconds = kwargs['max_cluster_stabilization_timeout_in_seconds'] + self.wait_time_between_iterations_in_seconds = kwargs['wait_time_between_iterations_in_seconds'] + self.wait_time_between_faults_in_seconds = kwargs['wait_time_between_faults_in_seconds'] + self.move_replica_fault_enabled = kwargs['move_replica_fault_enabled'] + self.included_node_type_list = kwargs['included_node_type_list'] + self.included_application_list = kwargs['included_application_list'] + self.cluster_health_policy = kwargs['cluster_health_policy'] + self.chaos_context = kwargs['chaos_context'] class ChaosStoppedEvent(ClusterEvent): @@ -5068,96 +5600,100 @@ class ChaosStoppedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param reason: Required. Describes reason. :type reason: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'reason': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosStoppedEvent, self).__init__(**kwargs) - self.reason = kwargs.get('reason', None) - self.kind = 'ChaosStopped' - - -class ChaosTargetFilter(Model): - """Defines all filters for targeted Chaos faults, for example, faulting only - certain node types or faulting only certain applications. - If ChaosTargetFilter is not used, Chaos faults all cluster entities. If - ChaosTargetFilter is used, Chaos faults only the entities that meet the - ChaosTargetFilter - specification. NodeTypeInclusionList and ApplicationInclusionList allow a - union semantics only. It is not possible to specify an intersection - of NodeTypeInclusionList and ApplicationInclusionList. For example, it is - not possible to specify "fault this application only when it is on that - node type." - Once an entity is included in either NodeTypeInclusionList or - ApplicationInclusionList, that entity cannot be excluded using - ChaosTargetFilter. Even if - applicationX does not appear in ApplicationInclusionList, in some Chaos - iteration applicationX can be faulted because it happens to be on a node of - nodeTypeY that is included - in NodeTypeInclusionList. If both NodeTypeInclusionList and - ApplicationInclusionList are null or empty, an ArgumentException is thrown. - - :param node_type_inclusion_list: A list of node types to include in Chaos - faults. - All types of faults (restart node, restart code package, remove replica, - restart replica, move primary, and move secondary) are enabled for the - nodes of these node types. - If a node type (say NodeTypeX) does not appear in the - NodeTypeInclusionList, then node level faults (like NodeRestart) will - never be enabled for the nodes of - NodeTypeX, but code package and replica faults can still be enabled for - NodeTypeX if an application in the ApplicationInclusionList. + self.kind = 'ChaosStopped' # type: str + self.reason = kwargs['reason'] + + +class ChaosTargetFilter(msrest.serialization.Model): + """Defines all filters for targeted Chaos faults, for example, faulting only certain node types or faulting only certain applications. +If ChaosTargetFilter is not used, Chaos faults all cluster entities. If ChaosTargetFilter is used, Chaos faults only the entities that meet the ChaosTargetFilter +specification. NodeTypeInclusionList and ApplicationInclusionList allow a union semantics only. It is not possible to specify an intersection +of NodeTypeInclusionList and ApplicationInclusionList. For example, it is not possible to specify "fault this application only when it is on that node type." +Once an entity is included in either NodeTypeInclusionList or ApplicationInclusionList, that entity cannot be excluded using ChaosTargetFilter. Even if +applicationX does not appear in ApplicationInclusionList, in some Chaos iteration applicationX can be faulted because it happens to be on a node of nodeTypeY that is included +in NodeTypeInclusionList. If both NodeTypeInclusionList and ApplicationInclusionList are null or empty, an ArgumentException is thrown. + + :param node_type_inclusion_list: A list of node types to include in Chaos faults. + All types of faults (restart node, restart code package, remove replica, restart replica, move + primary, and move secondary) are enabled for the nodes of these node types. + If a node type (say NodeTypeX) does not appear in the NodeTypeInclusionList, then node level + faults (like NodeRestart) will never be enabled for the nodes of + NodeTypeX, but code package and replica faults can still be enabled for NodeTypeX if an + application in the ApplicationInclusionList. happens to reside on a node of NodeTypeX. - At most 100 node type names can be included in this list, to increase this - number, a config upgrade is required for - MaxNumberOfNodeTypesInChaosEntityFilter configuration. + At most 100 node type names can be included in this list, to increase this number, a config + upgrade is required for MaxNumberOfNodeTypesInChaosEntityFilter configuration. :type node_type_inclusion_list: list[str] - :param application_inclusion_list: A list of application URIs to include - in Chaos faults. - All replicas belonging to services of these applications are amenable to - replica faults (restart replica, remove replica, move primary, and move - secondary) by Chaos. - Chaos may restart a code package only if the code package hosts replicas - of these applications only. - If an application does not appear in this list, it can still be faulted in - some Chaos iteration if the application ends up on a node of a node type - that is included in NodeTypeInclusionList. - However, if applicationX is tied to nodeTypeY through placement - constraints and applicationX is absent from ApplicationInclusionList and - nodeTypeY is absent from NodeTypeInclusionList, then applicationX will - never be faulted. - At most 1000 application names can be included in this list, to increase - this number, a config upgrade is required for - MaxNumberOfApplicationsInChaosEntityFilter configuration. + :param application_inclusion_list: A list of application URIs to include in Chaos faults. + All replicas belonging to services of these applications are amenable to replica faults + (restart replica, remove replica, move primary, and move secondary) by Chaos. + Chaos may restart a code package only if the code package hosts replicas of these applications + only. + If an application does not appear in this list, it can still be faulted in some Chaos + iteration if the application ends up on a node of a node type that is included in + NodeTypeInclusionList. + However, if applicationX is tied to nodeTypeY through placement constraints and applicationX + is absent from ApplicationInclusionList and nodeTypeY is absent from NodeTypeInclusionList, + then applicationX will never be faulted. + At most 1000 application names can be included in this list, to increase this number, a config + upgrade is required for MaxNumberOfApplicationsInChaosEntityFilter configuration. :type application_inclusion_list: list[str] """ @@ -5166,170 +5702,180 @@ class ChaosTargetFilter(Model): 'application_inclusion_list': {'key': 'ApplicationInclusionList', 'type': '[str]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ChaosTargetFilter, self).__init__(**kwargs) self.node_type_inclusion_list = kwargs.get('node_type_inclusion_list', None) self.application_inclusion_list = kwargs.get('application_inclusion_list', None) -class PropertyBatchOperation(Model): - """Represents the base type for property operations that can be put into a - batch and submitted. +class PropertyBatchOperation(msrest.serialization.Model): + """Represents the base type for property operations that can be put into a batch and submitted. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CheckExistsPropertyBatchOperation, - CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation, - DeletePropertyBatchOperation, GetPropertyBatchOperation, - PutPropertyBatchOperation + sub-classes are: CheckExistsPropertyBatchOperation, CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation, DeletePropertyBatchOperation, GetPropertyBatchOperation, PutPropertyBatchOperation. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, } _subtype_map = { 'kind': {'CheckExists': 'CheckExistsPropertyBatchOperation', 'CheckSequence': 'CheckSequencePropertyBatchOperation', 'CheckValue': 'CheckValuePropertyBatchOperation', 'Delete': 'DeletePropertyBatchOperation', 'Get': 'GetPropertyBatchOperation', 'Put': 'PutPropertyBatchOperation'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PropertyBatchOperation, self).__init__(**kwargs) - self.property_name = kwargs.get('property_name', None) - self.kind = None + self.kind = None # type: Optional[str] + self.property_name = kwargs['property_name'] class CheckExistsPropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that compares the Boolean existence of - a property with the Exists argument. - The PropertyBatchOperation operation fails if the property's existence is - not equal to the Exists argument. - The CheckExistsPropertyBatchOperation is generally used as a precondition - for the write operations in the batch. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that compares the Boolean existence of a property with the Exists argument. +The PropertyBatchOperation operation fails if the property's existence is not equal to the Exists argument. +The CheckExistsPropertyBatchOperation is generally used as a precondition for the write operations in the batch. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str - :param exists: Required. Whether or not the property should exist for the - operation to pass. + :param exists: Required. Whether or not the property should exist for the operation to pass. :type exists: bool """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, 'exists': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'exists': {'key': 'Exists', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(CheckExistsPropertyBatchOperation, self).__init__(**kwargs) - self.exists = kwargs.get('exists', None) - self.kind = 'CheckExists' + self.kind = 'CheckExists' # type: str + self.exists = kwargs['exists'] class CheckSequencePropertyBatchOperation(PropertyBatchOperation): - """Compares the Sequence Number of a property with the SequenceNumber - argument. - A property's sequence number can be thought of as that property's version. - Every time the property is modified, its sequence number is increased. - The sequence number can be found in a property's metadata. - The comparison fails if the sequence numbers are not equal. - CheckSequencePropertyBatchOperation is generally used as a precondition for - the write operations in the batch. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Compares the Sequence Number of a property with the SequenceNumber argument. +A property's sequence number can be thought of as that property's version. +Every time the property is modified, its sequence number is increased. +The sequence number can be found in a property's metadata. +The comparison fails if the sequence numbers are not equal. +CheckSequencePropertyBatchOperation is generally used as a precondition for the write operations in the batch. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str :param sequence_number: Required. The expected sequence number. :type sequence_number: str """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, 'sequence_number': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(CheckSequencePropertyBatchOperation, self).__init__(**kwargs) - self.sequence_number = kwargs.get('sequence_number', None) - self.kind = 'CheckSequence' + self.kind = 'CheckSequence' # type: str + self.sequence_number = kwargs['sequence_number'] class CheckValuePropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that compares the value of the property - with the expected value. - The CheckValuePropertyBatchOperation is generally used as a precondition - for the write operations in the batch. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that compares the value of the property with the expected value. +The CheckValuePropertyBatchOperation is generally used as a precondition for the write operations in the batch. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str :param value: Required. The expected property value. :type value: ~azure.servicefabric.models.PropertyValue """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, 'value': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(CheckValuePropertyBatchOperation, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - self.kind = 'CheckValue' + self.kind = 'CheckValue' # type: str + self.value = kwargs['value'] -class ClusterConfiguration(Model): +class ClusterConfiguration(msrest.serialization.Model): """Information about the standalone cluster configuration. - :param cluster_configuration: The contents of the cluster configuration - file. + :param cluster_configuration: The contents of the cluster configuration file. :type cluster_configuration: str """ @@ -5337,62 +5883,54 @@ class ClusterConfiguration(Model): 'cluster_configuration': {'key': 'ClusterConfiguration', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterConfiguration, self).__init__(**kwargs) self.cluster_configuration = kwargs.get('cluster_configuration', None) -class ClusterConfigurationUpgradeDescription(Model): +class ClusterConfigurationUpgradeDescription(msrest.serialization.Model): """Describes the parameters for a standalone cluster configuration upgrade. All required parameters must be populated in order to send to Azure. - :param cluster_config: Required. The cluster configuration as a JSON - string. For example, [this - file](https://github.com/Azure-Samples/service-fabric-dotnet-standalone-cluster-configuration/blob/master/Samples/ClusterConfig.Unsecure.DevCluster.json) - contains JSON describing the [nodes and other properties of the - cluster](https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-manifest). + :param cluster_config: Required. The cluster configuration as a JSON string. For example, `this + file + `_ + contains JSON describing the `nodes and other properties of the cluster + `_. :type cluster_config: str - :param health_check_retry_timeout: The length of time between attempts to - perform health checks if the application or cluster is not healthy. - Default value: "PT0H0M0S" . - :type health_check_retry_timeout: timedelta - :param health_check_wait_duration_in_seconds: The length of time to wait - after completing an upgrade domain before starting the health checks - process. Default value: "PT0H0M0S" . - :type health_check_wait_duration_in_seconds: timedelta - :param health_check_stable_duration_in_seconds: The length of time that - the application or cluster must remain healthy before the upgrade proceeds - to the next upgrade domain. Default value: "PT0H0M0S" . - :type health_check_stable_duration_in_seconds: timedelta - :param upgrade_domain_timeout_in_seconds: The timeout for the upgrade - domain. Default value: "PT0H0M0S" . - :type upgrade_domain_timeout_in_seconds: timedelta - :param upgrade_timeout_in_seconds: The upgrade timeout. Default value: - "PT0H0M0S" . - :type upgrade_timeout_in_seconds: timedelta - :param max_percent_unhealthy_applications: The maximum allowed percentage - of unhealthy applications during the upgrade. Allowed values are integer - values from zero to 100. Default value: 0 . + :param health_check_retry_timeout: The length of time between attempts to perform health checks + if the application or cluster is not healthy. + :type health_check_retry_timeout: ~datetime.timedelta + :param health_check_wait_duration_in_seconds: The length of time to wait after completing an + upgrade domain before starting the health checks process. + :type health_check_wait_duration_in_seconds: ~datetime.timedelta + :param health_check_stable_duration_in_seconds: The length of time that the application or + cluster must remain healthy before the upgrade proceeds to the next upgrade domain. + :type health_check_stable_duration_in_seconds: ~datetime.timedelta + :param upgrade_domain_timeout_in_seconds: The timeout for the upgrade domain. + :type upgrade_domain_timeout_in_seconds: ~datetime.timedelta + :param upgrade_timeout_in_seconds: The upgrade timeout. + :type upgrade_timeout_in_seconds: ~datetime.timedelta + :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy + applications during the upgrade. Allowed values are integer values from zero to 100. :type max_percent_unhealthy_applications: int - :param max_percent_unhealthy_nodes: The maximum allowed percentage of - unhealthy nodes during the upgrade. Allowed values are integer values from - zero to 100. Default value: 0 . + :param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes during + the upgrade. Allowed values are integer values from zero to 100. :type max_percent_unhealthy_nodes: int - :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage - of delta health degradation during the upgrade. Allowed values are integer - values from zero to 100. Default value: 0 . + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage of delta health + degradation during the upgrade. Allowed values are integer values from zero to 100. :type max_percent_delta_unhealthy_nodes: int - :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum - allowed percentage of upgrade domain delta health degradation during the - upgrade. Allowed values are integer values from zero to 100. Default - value: 0 . + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum allowed percentage of + upgrade domain delta health degradation during the upgrade. Allowed values are integer values + from zero to 100. :type max_percent_upgrade_domain_delta_unhealthy_nodes: int - :param application_health_policies: Defines the application health policy - map used to evaluate the health of an application or one of its children - entities. - :type application_health_policies: - ~azure.servicefabric.models.ApplicationHealthPolicies + :param application_health_policies: Defines the application health policy map used to evaluate + the health of an application or one of its children entities. + :type application_health_policies: ~azure.servicefabric.models.ApplicationHealthPolicies """ _validation = { @@ -5413,9 +5951,12 @@ class ClusterConfigurationUpgradeDescription(Model): 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterConfigurationUpgradeDescription, self).__init__(**kwargs) - self.cluster_config = kwargs.get('cluster_config', None) + self.cluster_config = kwargs['cluster_config'] self.health_check_retry_timeout = kwargs.get('health_check_retry_timeout', "PT0H0M0S") self.health_check_wait_duration_in_seconds = kwargs.get('health_check_wait_duration_in_seconds', "PT0H0M0S") self.health_check_stable_duration_in_seconds = kwargs.get('health_check_stable_duration_in_seconds', "PT0H0M0S") @@ -5428,13 +5969,12 @@ def __init__(self, **kwargs): self.application_health_policies = kwargs.get('application_health_policies', None) -class ClusterConfigurationUpgradeStatusInfo(Model): +class ClusterConfigurationUpgradeStatusInfo(msrest.serialization.Model): """Information about a standalone cluster configuration upgrade status. - :param upgrade_state: The state of the upgrade domain. Possible values - include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', - 'RollingForwardPending', 'RollingForwardInProgress', - 'RollingForwardCompleted', 'Failed' + :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", + "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", + "RollingForwardInProgress", "RollingForwardCompleted", "Failed". :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState :param progress_status: The cluster manifest version. :type progress_status: int @@ -5451,7 +5991,10 @@ class ClusterConfigurationUpgradeStatusInfo(Model): 'details': {'key': 'Details', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterConfigurationUpgradeStatusInfo, self).__init__(**kwargs) self.upgrade_state = kwargs.get('upgrade_state', None) self.progress_status = kwargs.get('progress_status', None) @@ -5461,35 +6004,28 @@ def __init__(self, **kwargs): class ClusterHealth(EntityHealth): """Represents the health of the cluster. - Contains the cluster aggregated health state, the cluster application and - node health states as well as the health events and the unhealthy - evaluations. - - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState +Contains the cluster aggregated health state, the cluster application and node health states as well as the health events and the unhealthy evaluations. + + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param node_health_states: Cluster node health states as found in the - health store. - :type node_health_states: - list[~azure.servicefabric.models.NodeHealthState] - :param application_health_states: Cluster application health states as - found in the health store. - :type application_health_states: - list[~azure.servicefabric.models.ApplicationHealthState] + :param node_health_states: Cluster node health states as found in the health store. + :type node_health_states: list[~azure.servicefabric.models.NodeHealthState] + :param application_health_states: Cluster application health states as found in the health + store. + :type application_health_states: list[~azure.servicefabric.models.ApplicationHealthState] """ _attribute_map = { @@ -5501,33 +6037,31 @@ class ClusterHealth(EntityHealth): 'application_health_states': {'key': 'ApplicationHealthStates', 'type': '[ApplicationHealthState]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterHealth, self).__init__(**kwargs) self.node_health_states = kwargs.get('node_health_states', None) self.application_health_states = kwargs.get('application_health_states', None) -class ClusterHealthChunk(Model): +class ClusterHealthChunk(msrest.serialization.Model): """Represents the health chunk of the cluster. - Contains the cluster aggregated health state, and the cluster entities that - respect the input filter. - - :param health_state: The HealthState representing the aggregated health - state of the cluster computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired cluster health policy and - the application health policies. Possible values include: 'Invalid', 'Ok', - 'Warning', 'Error', 'Unknown' +Contains the cluster aggregated health state, and the cluster entities that respect the input filter. + + :param health_state: The HealthState representing the aggregated health state of the cluster + computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired cluster health policy and the application + health policies. Possible values include: "Invalid", "Ok", "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param node_health_state_chunks: The list of node health state chunks in - the cluster that respect the filters in the cluster health chunk query - description. - :type node_health_state_chunks: - ~azure.servicefabric.models.NodeHealthStateChunkList - :param application_health_state_chunks: The list of application health - state chunks in the cluster that respect the filters in the cluster health - chunk query description. + :param node_health_state_chunks: The list of node health state chunks in the cluster that + respect the filters in the cluster health chunk query description. + :type node_health_state_chunks: ~azure.servicefabric.models.NodeHealthStateChunkList + :param application_health_state_chunks: The list of application health state chunks in the + cluster that respect the filters in the cluster health chunk query description. :type application_health_state_chunks: ~azure.servicefabric.models.ApplicationHealthStateChunkList """ @@ -5538,49 +6072,41 @@ class ClusterHealthChunk(Model): 'application_health_state_chunks': {'key': 'ApplicationHealthStateChunks', 'type': 'ApplicationHealthStateChunkList'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterHealthChunk, self).__init__(**kwargs) self.health_state = kwargs.get('health_state', None) self.node_health_state_chunks = kwargs.get('node_health_state_chunks', None) self.application_health_state_chunks = kwargs.get('application_health_state_chunks', None) -class ClusterHealthChunkQueryDescription(Model): - """The cluster health chunk query description, which can specify the health - policies to evaluate cluster health and very expressive filters to select - which cluster entities to include in response. +class ClusterHealthChunkQueryDescription(msrest.serialization.Model): + """The cluster health chunk query description, which can specify the health policies to evaluate cluster health and very expressive filters to select which cluster entities to include in response. - :param node_filters: Defines a list of filters that specify which nodes to - be included in the returned cluster health chunk. - If no filters are specified, no nodes are returned. All the nodes are used - to evaluate the cluster's aggregated health state, regardless of the input - filters. + :param node_filters: Defines a list of filters that specify which nodes to be included in the + returned cluster health chunk. + If no filters are specified, no nodes are returned. All the nodes are used to evaluate the + cluster's aggregated health state, regardless of the input filters. The cluster health chunk query may specify multiple node filters. - For example, it can specify a filter to return all nodes with health state - Error and another filter to always include a node identified by its - NodeName. - :type node_filters: - list[~azure.servicefabric.models.NodeHealthStateFilter] - :param application_filters: Defines a list of filters that specify which - applications to be included in the returned cluster health chunk. - If no filters are specified, no applications are returned. All the - applications are used to evaluate the cluster's aggregated health state, - regardless of the input filters. + For example, it can specify a filter to return all nodes with health state Error and another + filter to always include a node identified by its NodeName. + :type node_filters: list[~azure.servicefabric.models.NodeHealthStateFilter] + :param application_filters: Defines a list of filters that specify which applications to be + included in the returned cluster health chunk. + If no filters are specified, no applications are returned. All the applications are used to + evaluate the cluster's aggregated health state, regardless of the input filters. The cluster health chunk query may specify multiple application filters. - For example, it can specify a filter to return all applications with - health state Error and another filter to always include applications of a - specified application type. - :type application_filters: - list[~azure.servicefabric.models.ApplicationHealthStateFilter] - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param application_health_policies: Defines the application health policy - map used to evaluate the health of an application or one of its children - entities. - :type application_health_policies: - ~azure.servicefabric.models.ApplicationHealthPolicies + For example, it can specify a filter to return all applications with health state Error and + another filter to always include applications of a specified application type. + :type application_filters: list[~azure.servicefabric.models.ApplicationHealthStateFilter] + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param application_health_policies: Defines the application health policy map used to evaluate + the health of an application or one of its children entities. + :type application_health_policies: ~azure.servicefabric.models.ApplicationHealthPolicies """ _attribute_map = { @@ -5590,7 +6116,10 @@ class ClusterHealthChunkQueryDescription(Model): 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterHealthChunkQueryDescription, self).__init__(**kwargs) self.node_filters = kwargs.get('node_filters', None) self.application_filters = kwargs.get('application_filters', None) @@ -5598,24 +6127,22 @@ def __init__(self, **kwargs): self.application_health_policies = kwargs.get('application_health_policies', None) -class ClusterHealthPolicies(Model): +class ClusterHealthPolicies(msrest.serialization.Model): """Health policies to evaluate cluster health. - :param application_health_policy_map: Defines a map that contains specific - application health policies for different applications. - Each entry specifies as key the application name and as value an - ApplicationHealthPolicy used to evaluate the application health. - If an application is not specified in the map, the application health - evaluation uses the ApplicationHealthPolicy found in its application - manifest or the default application health policy (if no health policy is - defined in the manifest). + :param application_health_policy_map: Defines a map that contains specific application health + policies for different applications. + Each entry specifies as key the application name and as value an ApplicationHealthPolicy used + to evaluate the application health. + If an application is not specified in the map, the application health evaluation uses the + ApplicationHealthPolicy found in its application manifest or the default application health + policy (if no health policy is defined in the manifest). The map is empty by default. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy """ _attribute_map = { @@ -5623,71 +6150,106 @@ class ClusterHealthPolicies(Model): 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterHealthPolicies, self).__init__(**kwargs) self.application_health_policy_map = kwargs.get('application_health_policy_map', None) self.cluster_health_policy = kwargs.get('cluster_health_policy', None) -class ClusterHealthPolicy(Model): - """Defines a health policy used to evaluate the health of the cluster or of a - cluster node. +class ClusterHealthPolicy(msrest.serialization.Model): + """Defines a health policy used to evaluate the health of the cluster or of a cluster node. - :param consider_warning_as_error: Indicates whether warnings are treated - with the same severity as errors. Default value: False . + :param consider_warning_as_error: Indicates whether warnings are treated with the same severity + as errors. :type consider_warning_as_error: bool - :param max_percent_unhealthy_nodes: The maximum allowed percentage of - unhealthy nodes before reporting an error. For example, to allow 10% of - nodes to be unhealthy, this value would be 10. - The percentage represents the maximum tolerated percentage of nodes that - can be unhealthy before the cluster is considered in error. - If the percentage is respected but there is at least one unhealthy node, - the health is evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy nodes - over the total number of nodes in the cluster. - The computation rounds up to tolerate one failure on small numbers of - nodes. Default percentage is zero. - In large clusters, some nodes will always be down or out for repairs, so - this percentage should be configured to tolerate that. Default value: 0 . + :param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes before + reporting an error. For example, to allow 10% of nodes to be unhealthy, this value would be 10. + + The percentage represents the maximum tolerated percentage of nodes that can be unhealthy + before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy node, the health is + evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy nodes over the total number + of nodes in the cluster. + The computation rounds up to tolerate one failure on small numbers of nodes. Default + percentage is zero. + + In large clusters, some nodes will always be down or out for repairs, so this percentage + should be configured to tolerate that. :type max_percent_unhealthy_nodes: int - :param max_percent_unhealthy_applications: The maximum allowed percentage - of unhealthy applications before reporting an error. For example, to allow - 10% of applications to be unhealthy, this value would be 10. - The percentage represents the maximum tolerated percentage of applications - that can be unhealthy before the cluster is considered in error. - If the percentage is respected but there is at least one unhealthy - application, the health is evaluated as Warning. - This is calculated by dividing the number of unhealthy applications over - the total number of application instances in the cluster, excluding - applications of application types that are included in the - ApplicationTypeHealthPolicyMap. - The computation rounds up to tolerate one failure on small numbers of - applications. Default percentage is zero. Default value: 0 . + :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy + applications before reporting an error. For example, to allow 10% of applications to be + unhealthy, this value would be 10. + + The percentage represents the maximum tolerated percentage of applications that can be + unhealthy before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy application, the health is + evaluated as Warning. + This is calculated by dividing the number of unhealthy applications over the total number of + application instances in the cluster, excluding applications of application types that are + included in the ApplicationTypeHealthPolicyMap. + The computation rounds up to tolerate one failure on small numbers of applications. Default + percentage is zero. :type max_percent_unhealthy_applications: int - :param application_type_health_policy_map: Defines a map with max - percentage unhealthy applications for specific application types. - Each entry specifies as key the application type name and as value an - integer that represents the MaxPercentUnhealthyApplications percentage - used to evaluate the applications of the specified application type. - The application type health policy map can be used during cluster health - evaluation to describe special application types. - The application types included in the map are evaluated against the - percentage specified in the map, and not with the global - MaxPercentUnhealthyApplications defined in the cluster health policy. - The applications of application types specified in the map are not counted - against the global pool of applications. - For example, if some applications of a type are critical, the cluster - administrator can add an entry to the map for that application type + :param application_type_health_policy_map: Defines a map with max percentage unhealthy + applications for specific application types. + Each entry specifies as key the application type name and as value an integer that represents + the MaxPercentUnhealthyApplications percentage used to evaluate the applications of the + specified application type. + + The application type health policy map can be used during cluster health evaluation to + describe special application types. + The application types included in the map are evaluated against the percentage specified in + the map, and not with the global MaxPercentUnhealthyApplications defined in the cluster health + policy. + The applications of application types specified in the map are not counted against the global + pool of applications. + For example, if some applications of a type are critical, the cluster administrator can add an + entry to the map for that application type and assign it a value of 0% (that is, do not tolerate any failures). - All other applications can be evaluated with - MaxPercentUnhealthyApplications set to 20% to tolerate some failures out - of the thousands of application instances. - The application type health policy map is used only if the cluster - manifest enables application type health evaluation using the - configuration entry for + All other applications can be evaluated with MaxPercentUnhealthyApplications set to 20% to + tolerate some failures out of the thousands of application instances. + The application type health policy map is used only if the cluster manifest enables + application type health evaluation using the configuration entry for HealthManager/EnableApplicationTypeHealthEvaluation. :type application_type_health_policy_map: list[~azure.servicefabric.models.ApplicationTypeHealthPolicyMapItem] + :param node_type_health_policy_map: Defines a map with max percentage unhealthy nodes for + specific node types. + Each entry specifies as key the node type name and as value an integer that represents the + MaxPercentUnhealthyNodes percentage used to evaluate the nodes of the specified node type. + + The node type health policy map can be used during cluster health evaluation to describe + special node types. + They are evaluated against the percentages associated with their node type name in the map. + Setting this has no impact on the global pool of nodes used for MaxPercentUnhealthyNodes. + The node type health policy map is used only if the cluster manifest enables node type health + evaluation using the configuration entry for HealthManager/EnableNodeTypeHealthEvaluation. + + For example, given a cluster with many nodes of different types, with important work hosted on + node type "SpecialNodeType" that should not tolerate any nodes down. + You can specify global MaxPercentUnhealthyNodes to 20% to tolerate some failures for all + nodes, but for the node type "SpecialNodeType", set the MaxPercentUnhealthyNodes to 0 by + setting the value in the key value pair in NodeTypeHealthPolicyMapItem. The key is the node + type name. + This way, as long as no nodes of type "SpecialNodeType" are in Error state, + even if some of the many nodes in the global pool are in Error state, but below the global + unhealthy percentage, the cluster would be evaluated to Warning. + A Warning health state does not impact cluster upgrade or other monitoring triggered by Error + health state. + But even one node of type SpecialNodeType in Error would make cluster unhealthy (in Error + rather than Warning/Ok), which triggers rollback or pauses the cluster upgrade, depending on + the upgrade configuration. + + Conversely, setting the global MaxPercentUnhealthyNodes to 0, and setting SpecialNodeType's + max percent unhealthy nodes to 100, + with one node of type SpecialNodeType in Error state would still put the cluster in an Error + state, since the global restriction is more strict in this case. + :type node_type_health_policy_map: + list[~azure.servicefabric.models.NodeTypeHealthPolicyMapItem] """ _attribute_map = { @@ -5695,14 +6257,19 @@ class ClusterHealthPolicy(Model): 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'application_type_health_policy_map': {'key': 'ApplicationTypeHealthPolicyMap', 'type': '[ApplicationTypeHealthPolicyMapItem]'}, + 'node_type_health_policy_map': {'key': 'NodeTypeHealthPolicyMap', 'type': '[NodeTypeHealthPolicyMapItem]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterHealthPolicy, self).__init__(**kwargs) self.consider_warning_as_error = kwargs.get('consider_warning_as_error', False) self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', 0) self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', 0) self.application_type_health_policy_map = kwargs.get('application_type_health_policy_map', None) + self.node_type_health_policy_map = kwargs.get('node_type_health_policy_map', None) class ClusterHealthReportExpiredEvent(ClusterEvent): @@ -5710,18 +6277,38 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param source_id: Required. Id of report source. :type source_id: str :param property: Required. Describes the property. @@ -5734,17 +6321,16 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, 'health_state': {'required': True}, @@ -5756,11 +6342,11 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, @@ -5771,33 +6357,32 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterHealthReportExpiredEvent, self).__init__(**kwargs) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'ClusterHealthReportExpired' - - -class ClusterLoadInfo(Model): - """Information about load in a Service Fabric cluster. It holds a summary of - all metrics and their load in a cluster. - - :param last_balancing_start_time_utc: The starting time of last resource - balancing run. - :type last_balancing_start_time_utc: datetime - :param last_balancing_end_time_utc: The end time of last resource - balancing run. - :type last_balancing_end_time_utc: datetime - :param load_metric_information: List that contains metrics and their load - information in this cluster. - :type load_metric_information: - list[~azure.servicefabric.models.LoadMetricInformation] + self.kind = 'ClusterHealthReportExpired' # type: str + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] + + +class ClusterLoadInfo(msrest.serialization.Model): + """Information about load in a Service Fabric cluster. It holds a summary of all metrics and their load in a cluster. + + :param last_balancing_start_time_utc: The starting time of last resource balancing run. + :type last_balancing_start_time_utc: ~datetime.datetime + :param last_balancing_end_time_utc: The end time of last resource balancing run. + :type last_balancing_end_time_utc: ~datetime.datetime + :param load_metric_information: List that contains metrics and their load information in this + cluster. + :type load_metric_information: list[~azure.servicefabric.models.LoadMetricInformation] """ _attribute_map = { @@ -5806,14 +6391,17 @@ class ClusterLoadInfo(Model): 'load_metric_information': {'key': 'LoadMetricInformation', 'type': '[LoadMetricInformation]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterLoadInfo, self).__init__(**kwargs) self.last_balancing_start_time_utc = kwargs.get('last_balancing_start_time_utc', None) self.last_balancing_end_time_utc = kwargs.get('last_balancing_end_time_utc', None) self.load_metric_information = kwargs.get('load_metric_information', None) -class ClusterManifest(Model): +class ClusterManifest(msrest.serialization.Model): """Information about the cluster manifest. :param manifest: The contents of the cluster manifest file. @@ -5824,7 +6412,10 @@ class ClusterManifest(Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterManifest, self).__init__(**kwargs) self.manifest = kwargs.get('manifest', None) @@ -5834,18 +6425,38 @@ class ClusterNewHealthReportEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param source_id: Required. Id of report source. :type source_id: str :param property: Required. Describes the property. @@ -5858,17 +6469,16 @@ class ClusterNewHealthReportEvent(ClusterEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, 'health_state': {'required': True}, @@ -5880,11 +6490,11 @@ class ClusterNewHealthReportEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, @@ -5895,17 +6505,20 @@ class ClusterNewHealthReportEvent(ClusterEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterNewHealthReportEvent, self).__init__(**kwargs) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'ClusterNewHealthReport' + self.kind = 'ClusterNewHealthReport' # type: str + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class ClusterUpgradeCompletedEvent(ClusterEvent): @@ -5913,102 +6526,115 @@ class ClusterUpgradeCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of - upgrade in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in + milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterUpgradeCompletedEvent, self).__init__(**kwargs) - self.target_cluster_version = kwargs.get('target_cluster_version', None) - self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) - self.kind = 'ClusterUpgradeCompleted' + self.kind = 'ClusterUpgradeCompleted' # type: str + self.target_cluster_version = kwargs['target_cluster_version'] + self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] -class ClusterUpgradeDescriptionObject(Model): +class ClusterUpgradeDescriptionObject(msrest.serialization.Model): """Represents a ServiceFabric cluster upgrade. - :param config_version: The cluster configuration version (specified in the - cluster manifest). + :param config_version: The cluster configuration version (specified in the cluster manifest). :type config_version: str :param code_version: The ServiceFabric code version of the cluster. :type code_version: str - :param upgrade_kind: The kind of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling'. Default value: - "Rolling" . + :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values + include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through - the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', - 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default - value: "Default" . + :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible + values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", + "ReverseLexicographical". Default value: "Default". :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param enable_delta_health_evaluation: When true, enables delta health - evaluation rather than absolute health evaluation after completion of each - upgrade domain. + :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than + absolute health evaluation after completion of each upgrade domain. :type enable_delta_health_evaluation: bool - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param cluster_upgrade_health_policy: Defines a health policy used to - evaluate the health of the cluster during a cluster upgrade. + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of + the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Represents the map of application - health policies for a ServiceFabric cluster upgrade + :param application_health_policy_map: Represents the map of application health policies for a + ServiceFabric cluster upgrade. :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicyMapObject """ @@ -6028,14 +6654,17 @@ class ClusterUpgradeDescriptionObject(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicyMapObject'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterUpgradeDescriptionObject, self).__init__(**kwargs) self.config_version = kwargs.get('config_version', None) self.code_version = kwargs.get('code_version', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) - self.force_restart = kwargs.get('force_restart', None) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) + self.force_restart = kwargs.get('force_restart', False) self.sort_order = kwargs.get('sort_order', "Default") self.enable_delta_health_evaluation = kwargs.get('enable_delta_health_evaluation', None) self.monitoring_policy = kwargs.get('monitoring_policy', None) @@ -6049,33 +6678,53 @@ class ClusterUpgradeDomainCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param upgrade_state: Required. State of upgrade. :type upgrade_state: str :param upgrade_domains: Required. Upgrade domains. :type upgrade_domains: str - :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain - upgrade in milli-seconds. + :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain upgrade in + milli-seconds. :type upgrade_domain_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'upgrade_state': {'required': True}, 'upgrade_domains': {'required': True}, @@ -6083,46 +6732,44 @@ class ClusterUpgradeDomainCompletedEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, 'upgrade_domains': {'key': 'UpgradeDomains', 'type': 'str'}, 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterUpgradeDomainCompletedEvent, self).__init__(**kwargs) - self.target_cluster_version = kwargs.get('target_cluster_version', None) - self.upgrade_state = kwargs.get('upgrade_state', None) - self.upgrade_domains = kwargs.get('upgrade_domains', None) - self.upgrade_domain_elapsed_time_in_ms = kwargs.get('upgrade_domain_elapsed_time_in_ms', None) - self.kind = 'ClusterUpgradeDomainCompleted' + self.kind = 'ClusterUpgradeDomainCompleted' # type: str + self.target_cluster_version = kwargs['target_cluster_version'] + self.upgrade_state = kwargs['upgrade_state'] + self.upgrade_domains = kwargs['upgrade_domains'] + self.upgrade_domain_elapsed_time_in_ms = kwargs['upgrade_domain_elapsed_time_in_ms'] -class ClusterUpgradeHealthPolicyObject(Model): - """Defines a health policy used to evaluate the health of the cluster during a - cluster upgrade. +class ClusterUpgradeHealthPolicyObject(msrest.serialization.Model): + """Defines a health policy used to evaluate the health of the cluster during a cluster upgrade. - :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage - of nodes health degradation allowed during cluster upgrades. The delta is - measured between the state of the nodes at the beginning of upgrade and - the state of the nodes at the time of the health evaluation. The check is - performed after every upgrade domain upgrade completion to make sure the - global state of the cluster is within tolerated limits. The default value - is 10%. + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage of nodes health + degradation allowed during cluster upgrades. The delta is measured between the state of the + nodes at the beginning of upgrade and the state of the nodes at the time of the health + evaluation. The check is performed after every upgrade domain upgrade completion to make sure + the global state of the cluster is within tolerated limits. The default value is 10%. :type max_percent_delta_unhealthy_nodes: int - :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum - allowed percentage of upgrade domain nodes health degradation allowed - during cluster upgrades. The delta is measured between the state of the - upgrade domain nodes at the beginning of upgrade and the state of the - upgrade domain nodes at the time of the health evaluation. The check is - performed after every upgrade domain upgrade completion for all completed - upgrade domains to make sure the state of the upgrade domains is within - tolerated limits. The default value is 15%. + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum allowed percentage of + upgrade domain nodes health degradation allowed during cluster upgrades. The delta is measured + between the state of the upgrade domain nodes at the beginning of upgrade and the state of the + upgrade domain nodes at the time of the health evaluation. The check is performed after every + upgrade domain upgrade completion for all completed upgrade domains to make sure the state of + the upgrade domains is within tolerated limits. The default value is 15%. :type max_percent_upgrade_domain_delta_unhealthy_nodes: int """ @@ -6136,63 +6783,60 @@ class ClusterUpgradeHealthPolicyObject(Model): 'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'MaxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterUpgradeHealthPolicyObject, self).__init__(**kwargs) self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', None) self.max_percent_upgrade_domain_delta_unhealthy_nodes = kwargs.get('max_percent_upgrade_domain_delta_unhealthy_nodes', None) -class ClusterUpgradeProgressObject(Model): +class ClusterUpgradeProgressObject(msrest.serialization.Model): """Information about a cluster upgrade. :param code_version: The ServiceFabric code version of the cluster. :type code_version: str - :param config_version: The cluster configuration version (specified in the - cluster manifest). + :param config_version: The cluster configuration version (specified in the cluster manifest). :type config_version: str :param upgrade_domains: List of upgrade domains and their statuses. :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] - :param upgrade_state: The state of the upgrade domain. Possible values - include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', - 'RollingForwardPending', 'RollingForwardInProgress', - 'RollingForwardCompleted', 'Failed' + :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", + "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", + "RollingForwardInProgress", "RollingForwardCompleted", "Failed". :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState - :param next_upgrade_domain: The name of the next upgrade domain to be - processed. + :param next_upgrade_domain: The name of the next upgrade domain to be processed. :type next_upgrade_domain: str - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_description: Represents a ServiceFabric cluster upgrade - :type upgrade_description: - ~azure.servicefabric.models.ClusterUpgradeDescriptionObject - :param upgrade_duration_in_milliseconds: The estimated elapsed time spent - processing the current overall upgrade. + :param upgrade_description: Represents a ServiceFabric cluster upgrade. + :type upgrade_description: ~azure.servicefabric.models.ClusterUpgradeDescriptionObject + :param upgrade_duration_in_milliseconds: The estimated elapsed time spent processing the + current overall upgrade. :type upgrade_duration_in_milliseconds: str - :param upgrade_domain_duration_in_milliseconds: The estimated elapsed time - spent processing the current upgrade domain. + :param upgrade_domain_duration_in_milliseconds: The estimated elapsed time spent processing the + current upgrade domain. :type upgrade_domain_duration_in_milliseconds: str - :param unhealthy_evaluations: List of health evaluations that resulted in - the current aggregated health state. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current - in-progress upgrade domain. + :param unhealthy_evaluations: List of health evaluations that resulted in the current + aggregated health state. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current in-progress upgrade + domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo :param start_timestamp_utc: The start time of the upgrade in UTC. :type start_timestamp_utc: str :param failure_timestamp_utc: The failure time of the upgrade in UTC. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in - FailureAction being executed. Possible values include: 'None', - 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', - 'OverallUpgradeTimeout' + :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being + executed. Possible values include: "None", "Interrupted", "HealthCheck", + "UpgradeDomainTimeout", "OverallUpgradeTimeout". :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: The detailed upgrade progress - for nodes in the current upgrade domain at the point of failure. + :param upgrade_domain_progress_at_failure: The detailed upgrade progress for nodes in the + current upgrade domain at the point of failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailedUpgradeDomainProgressObject """ @@ -6215,7 +6859,10 @@ class ClusterUpgradeProgressObject(Model): 'upgrade_domain_progress_at_failure': {'key': 'UpgradeDomainProgressAtFailure', 'type': 'FailedUpgradeDomainProgressObject'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterUpgradeProgressObject, self).__init__(**kwargs) self.code_version = kwargs.get('code_version', None) self.config_version = kwargs.get('config_version', None) @@ -6239,53 +6886,76 @@ class ClusterUpgradeRollbackCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param failure_reason: Required. Describes failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of - upgrade in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in + milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'failure_reason': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterUpgradeRollbackCompletedEvent, self).__init__(**kwargs) - self.target_cluster_version = kwargs.get('target_cluster_version', None) - self.failure_reason = kwargs.get('failure_reason', None) - self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) - self.kind = 'ClusterUpgradeRollbackCompleted' + self.kind = 'ClusterUpgradeRollbackCompleted' # type: str + self.target_cluster_version = kwargs['target_cluster_version'] + self.failure_reason = kwargs['failure_reason'] + self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] class ClusterUpgradeRollbackStartedEvent(ClusterEvent): @@ -6293,53 +6963,76 @@ class ClusterUpgradeRollbackStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param failure_reason: Required. Describes failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of - upgrade in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in + milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'failure_reason': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterUpgradeRollbackStartedEvent, self).__init__(**kwargs) - self.target_cluster_version = kwargs.get('target_cluster_version', None) - self.failure_reason = kwargs.get('failure_reason', None) - self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) - self.kind = 'ClusterUpgradeRollbackStarted' + self.kind = 'ClusterUpgradeRollbackStarted' # type: str + self.target_cluster_version = kwargs['target_cluster_version'] + self.failure_reason = kwargs['failure_reason'] + self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] class ClusterUpgradeStartedEvent(ClusterEvent): @@ -6347,18 +7040,38 @@ class ClusterUpgradeStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param current_cluster_version: Required. Current Cluster version. :type current_cluster_version: str :param target_cluster_version: Required. Target Cluster version. @@ -6372,9 +7085,9 @@ class ClusterUpgradeStartedEvent(ClusterEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'current_cluster_version': {'required': True}, 'target_cluster_version': {'required': True}, 'upgrade_type': {'required': True}, @@ -6383,11 +7096,11 @@ class ClusterUpgradeStartedEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'current_cluster_version': {'key': 'CurrentClusterVersion', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'upgrade_type': {'key': 'UpgradeType', 'type': 'str'}, @@ -6395,17 +7108,20 @@ class ClusterUpgradeStartedEvent(ClusterEvent): 'failure_action': {'key': 'FailureAction', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterUpgradeStartedEvent, self).__init__(**kwargs) - self.current_cluster_version = kwargs.get('current_cluster_version', None) - self.target_cluster_version = kwargs.get('target_cluster_version', None) - self.upgrade_type = kwargs.get('upgrade_type', None) - self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', None) - self.failure_action = kwargs.get('failure_action', None) - self.kind = 'ClusterUpgradeStarted' + self.kind = 'ClusterUpgradeStarted' # type: str + self.current_cluster_version = kwargs['current_cluster_version'] + self.target_cluster_version = kwargs['target_cluster_version'] + self.upgrade_type = kwargs['upgrade_type'] + self.rolling_upgrade_mode = kwargs['rolling_upgrade_mode'] + self.failure_action = kwargs['failure_action'] -class ClusterVersion(Model): +class ClusterVersion(msrest.serialization.Model): """The cluster version. :param version: The Service Fabric cluster runtime version. @@ -6416,38 +7132,37 @@ class ClusterVersion(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ClusterVersion, self).__init__(**kwargs) self.version = kwargs.get('version', None) -class CodePackageEntryPoint(Model): - """Information about setup or main entry point of a code package deployed on a - Service Fabric node. +class CodePackageEntryPoint(msrest.serialization.Model): + """Information about setup or main entry point of a code package deployed on a Service Fabric node. - :param entry_point_location: The location of entry point executable on the - node. + :param entry_point_location: The location of entry point executable on the node. :type entry_point_location: str :param process_id: The process ID of the entry point. :type process_id: str - :param run_as_user_name: The user name under which entry point executable - is run on the node. + :param run_as_user_name: The user name under which entry point executable is run on the node. :type run_as_user_name: str - :param code_package_entry_point_statistics: Statistics about setup or main - entry point of a code package deployed on a Service Fabric node. + :param code_package_entry_point_statistics: Statistics about setup or main entry point of a + code package deployed on a Service Fabric node. :type code_package_entry_point_statistics: ~azure.servicefabric.models.CodePackageEntryPointStatistics - :param status: Specifies the status of the code package entry point - deployed on a Service Fabric node. Possible values include: 'Invalid', - 'Pending', 'Starting', 'Started', 'Stopping', 'Stopped' + :param status: Specifies the status of the code package entry point deployed on a Service + Fabric node. Possible values include: "Invalid", "Pending", "Starting", "Started", "Stopping", + "Stopped". :type status: str or ~azure.servicefabric.models.EntryPointStatus - :param next_activation_time: The time (in UTC) when the entry point - executable will be run next. - :type next_activation_time: datetime - :param instance_id: The instance ID for current running entry point. For a - code package setup entry point (if specified) runs first and after it - finishes main entry point is started. Each time entry point executable is - run, its instance id will change. + :param next_activation_time: The time (in UTC) when the entry point executable will be run + next. + :type next_activation_time: ~datetime.datetime + :param instance_id: The instance ID for current running entry point. For a code package setup + entry point (if specified) runs first and after it finishes main entry point is started. Each + time entry point executable is run, its instance id will change. :type instance_id: str """ @@ -6461,7 +7176,10 @@ class CodePackageEntryPoint(Model): 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(CodePackageEntryPoint, self).__init__(**kwargs) self.entry_point_location = kwargs.get('entry_point_location', None) self.process_id = kwargs.get('process_id', None) @@ -6472,39 +7190,35 @@ def __init__(self, **kwargs): self.instance_id = kwargs.get('instance_id', None) -class CodePackageEntryPointStatistics(Model): - """Statistics about setup or main entry point of a code package deployed on a - Service Fabric node. +class CodePackageEntryPointStatistics(msrest.serialization.Model): + """Statistics about setup or main entry point of a code package deployed on a Service Fabric node. :param last_exit_code: The last exit code of the entry point. :type last_exit_code: str - :param last_activation_time: The last time (in UTC) when Service Fabric - attempted to run the entry point. - :type last_activation_time: datetime - :param last_exit_time: The last time (in UTC) when the entry point - finished running. - :type last_exit_time: datetime - :param last_successful_activation_time: The last time (in UTC) when the - entry point ran successfully. - :type last_successful_activation_time: datetime - :param last_successful_exit_time: The last time (in UTC) when the entry - point finished running gracefully. - :type last_successful_exit_time: datetime + :param last_activation_time: The last time (in UTC) when Service Fabric attempted to run the + entry point. + :type last_activation_time: ~datetime.datetime + :param last_exit_time: The last time (in UTC) when the entry point finished running. + :type last_exit_time: ~datetime.datetime + :param last_successful_activation_time: The last time (in UTC) when the entry point ran + successfully. + :type last_successful_activation_time: ~datetime.datetime + :param last_successful_exit_time: The last time (in UTC) when the entry point finished running + gracefully. + :type last_successful_exit_time: ~datetime.datetime :param activation_count: Number of times the entry point has run. :type activation_count: str - :param activation_failure_count: Number of times the entry point failed to - run. + :param activation_failure_count: Number of times the entry point failed to run. :type activation_failure_count: str - :param continuous_activation_failure_count: Number of times the entry - point continuously failed to run. + :param continuous_activation_failure_count: Number of times the entry point continuously failed + to run. :type continuous_activation_failure_count: str :param exit_count: Number of times the entry point finished running. :type exit_count: str - :param exit_failure_count: Number of times the entry point failed to exit - gracefully. + :param exit_failure_count: Number of times the entry point failed to exit gracefully. :type exit_failure_count: str - :param continuous_exit_failure_count: Number of times the entry point - continuously failed to exit gracefully. + :param continuous_exit_failure_count: Number of times the entry point continuously failed to + exit gracefully. :type continuous_exit_failure_count: str """ @@ -6522,7 +7236,10 @@ class CodePackageEntryPointStatistics(Model): 'continuous_exit_failure_count': {'key': 'ContinuousExitFailureCount', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(CodePackageEntryPointStatistics, self).__init__(**kwargs) self.last_exit_code = kwargs.get('last_exit_code', None) self.last_activation_time = kwargs.get('last_activation_time', None) @@ -6537,20 +7254,17 @@ def __init__(self, **kwargs): self.continuous_exit_failure_count = kwargs.get('continuous_exit_failure_count', None) -class ComposeDeploymentStatusInfo(Model): +class ComposeDeploymentStatusInfo(msrest.serialization.Model): """Information about a Service Fabric compose deployment. :param name: The name of the deployment. :type name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param status: The status of the compose deployment. Possible values - include: 'Invalid', 'Provisioning', 'Creating', 'Ready', 'Unprovisioning', - 'Deleting', 'Failed', 'Upgrading' + :param status: The status of the compose deployment. Possible values include: "Invalid", + "Provisioning", "Creating", "Ready", "Unprovisioning", "Deleting", "Failed", "Upgrading". :type status: str or ~azure.servicefabric.models.ComposeDeploymentStatus - :param status_details: The status details of compose deployment including - failure message. + :param status_details: The status details of compose deployment including failure message. :type status_details: str """ @@ -6561,7 +7275,10 @@ class ComposeDeploymentStatusInfo(Model): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ComposeDeploymentStatusInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.application_name = kwargs.get('application_name', None) @@ -6569,48 +7286,40 @@ def __init__(self, **kwargs): self.status_details = kwargs.get('status_details', None) -class ComposeDeploymentUpgradeDescription(Model): +class ComposeDeploymentUpgradeDescription(msrest.serialization.Model): """Describes the parameters for a compose deployment upgrade. All required parameters must be populated in order to send to Azure. :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: Required. The content of the compose file - that describes the deployment to create. + :param compose_file_content: Required. The content of the compose file that describes the + deployment to create. :type compose_file_content: str - :param registry_credential: Credential information to connect to container - registry. + :param registry_credential: Credential information to connect to container registry. :type registry_credential: ~azure.servicefabric.models.RegistryCredential - :param upgrade_kind: Required. The kind of upgrade out of the following - possible values. Possible values include: 'Invalid', 'Rolling'. Default - value: "Rolling" . + :param upgrade_kind: Required. The kind of upgrade out of the following possible values. + Possible values include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate - the health of an application or one of its children entities. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate the health of an + application or one of its children entities. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy """ _validation = { @@ -6631,105 +7340,94 @@ class ComposeDeploymentUpgradeDescription(Model): 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ComposeDeploymentUpgradeDescription, self).__init__(**kwargs) - self.deployment_name = kwargs.get('deployment_name', None) - self.compose_file_content = kwargs.get('compose_file_content', None) + self.deployment_name = kwargs['deployment_name'] + self.compose_file_content = kwargs['compose_file_content'] self.registry_credential = kwargs.get('registry_credential', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) - self.force_restart = kwargs.get('force_restart', None) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) + self.force_restart = kwargs.get('force_restart', False) self.monitoring_policy = kwargs.get('monitoring_policy', None) self.application_health_policy = kwargs.get('application_health_policy', None) -class ComposeDeploymentUpgradeProgressInfo(Model): +class ComposeDeploymentUpgradeProgressInfo(msrest.serialization.Model): """Describes the parameters for a compose deployment upgrade. :param deployment_name: The name of the target deployment. :type deployment_name: str - :param application_name: The name of the target application, including the - 'fabric:' URI scheme. + :param application_name: The name of the target application, including the 'fabric:' URI + scheme. :type application_name: str - :param upgrade_state: The state of the compose deployment upgrade. - Possible values include: 'Invalid', 'ProvisioningTarget', - 'RollingForwardInProgress', 'RollingForwardPending', - 'UnprovisioningCurrent', 'RollingForwardCompleted', - 'RollingBackInProgress', 'UnprovisioningTarget', 'RollingBackCompleted', - 'Failed' - :type upgrade_state: str or - ~azure.servicefabric.models.ComposeDeploymentUpgradeState - :param upgrade_status_details: Additional detailed information about the - status of the pending upgrade. + :param upgrade_state: The state of the compose deployment upgrade. Possible values include: + "Invalid", "ProvisioningTarget", "RollingForwardInProgress", "RollingForwardPending", + "UnprovisioningCurrent", "RollingForwardCompleted", "RollingBackInProgress", + "UnprovisioningTarget", "RollingBackCompleted", "Failed". + :type upgrade_state: str or ~azure.servicefabric.models.ComposeDeploymentUpgradeState + :param upgrade_status_details: Additional detailed information about the status of the pending + upgrade. :type upgrade_status_details: str - :param upgrade_kind: The kind of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling'. Default value: - "Rolling" . + :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values + include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate - the health of an application or one of its children entities. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param target_application_type_version: The target application type - version (found in the application manifest) for the application upgrade. + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate the health of an + application or one of its children entities. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :param target_application_type_version: The target application type version (found in the + application manifest) for the application upgrade. :type target_application_type_version: str - :param upgrade_duration: The estimated amount of time that the overall - upgrade elapsed. It is first interpreted as a string representing an ISO - 8601 duration. If that fails, then it is interpreted as a number - representing the total number of milliseconds. + :param upgrade_duration: The estimated amount of time that the overall upgrade elapsed. It is + first interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type upgrade_duration: str - :param current_upgrade_domain_duration: The estimated amount of time spent - processing current Upgrade Domain. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param current_upgrade_domain_duration: The estimated amount of time spent processing current + Upgrade Domain. It is first interpreted as a string representing an ISO 8601 duration. If that + fails, then it is interpreted as a number representing the total number of milliseconds. :type current_upgrade_domain_duration: str - :param application_unhealthy_evaluations: List of health evaluations that - resulted in the current aggregated health state. + :param application_unhealthy_evaluations: List of health evaluations that resulted in the + current aggregated health state. :type application_unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current - in-progress upgrade domain. + :param current_upgrade_domain_progress: Information about the current in-progress upgrade + domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo - :param start_timestamp_utc: The estimated UTC datetime when the upgrade - started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. :type start_timestamp_utc: str - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade - failed and FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and + FailureAction was executed. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in - FailureAction being executed. Possible values include: 'None', - 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', - 'OverallUpgradeTimeout' + :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being + executed. Possible values include: "None", "Interrupted", "HealthCheck", + "UpgradeDomainTimeout", "OverallUpgradeTimeout". :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: Information about the upgrade - domain progress at the time of upgrade failure. + :param upgrade_domain_progress_at_failure: Information about the upgrade domain progress at the + time of upgrade failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo - :param application_upgrade_status_details: Additional details of - application upgrade including failure message. + :param application_upgrade_status_details: Additional details of application upgrade including + failure message. :type application_upgrade_status_details: str """ @@ -6756,7 +7454,10 @@ class ComposeDeploymentUpgradeProgressInfo(Model): 'application_upgrade_status_details': {'key': 'ApplicationUpgradeStatusDetails', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ComposeDeploymentUpgradeProgressInfo, self).__init__(**kwargs) self.deployment_name = kwargs.get('deployment_name', None) self.application_name = kwargs.get('application_name', None) @@ -6764,13 +7465,13 @@ def __init__(self, **kwargs): self.upgrade_status_details = kwargs.get('upgrade_status_details', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.force_restart = kwargs.get('force_restart', None) - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.force_restart = kwargs.get('force_restart', False) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) self.monitoring_policy = kwargs.get('monitoring_policy', None) self.application_health_policy = kwargs.get('application_health_policy', None) self.target_application_type_version = kwargs.get('target_application_type_version', None) - self.upgrade_duration = kwargs.get('upgrade_duration', None) - self.current_upgrade_domain_duration = kwargs.get('current_upgrade_domain_duration', None) + self.upgrade_duration = kwargs.get('upgrade_duration', "PT0H2M0S") + self.current_upgrade_domain_duration = kwargs.get('current_upgrade_domain_duration', "PT0H2M0S") self.application_unhealthy_evaluations = kwargs.get('application_unhealthy_evaluations', None) self.current_upgrade_domain_progress = kwargs.get('current_upgrade_domain_progress', None) self.start_timestamp_utc = kwargs.get('start_timestamp_utc', None) @@ -6780,23 +7481,21 @@ def __init__(self, **kwargs): self.application_upgrade_status_details = kwargs.get('application_upgrade_status_details', None) -class ConfigParameterOverride(Model): +class ConfigParameterOverride(msrest.serialization.Model): """Information about a configuration parameter override. All required parameters must be populated in order to send to Azure. - :param section_name: Required. Name of the section for the parameter - override. + :param section_name: Required. Name of the section for the parameter override. :type section_name: str - :param parameter_name: Required. Name of the parameter that has been - overridden. + :param parameter_name: Required. Name of the parameter that has been overridden. :type parameter_name: str :param parameter_value: Required. Value of the overridden parameter. :type parameter_value: str :param timeout: The duration until config override is considered as valid. - :type timeout: timedelta - :param persist_across_upgrade: A value that indicates whether config - override will be removed on upgrade or will still be considered as valid. + :type timeout: ~datetime.timedelta + :param persist_across_upgrade: A value that indicates whether config override will be removed + on upgrade or will still be considered as valid. :type persist_across_upgrade: bool """ @@ -6814,28 +7513,31 @@ class ConfigParameterOverride(Model): 'persist_across_upgrade': {'key': 'PersistAcrossUpgrade', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ConfigParameterOverride, self).__init__(**kwargs) - self.section_name = kwargs.get('section_name', None) - self.parameter_name = kwargs.get('parameter_name', None) - self.parameter_value = kwargs.get('parameter_value', None) + self.section_name = kwargs['section_name'] + self.parameter_name = kwargs['parameter_name'] + self.parameter_value = kwargs['parameter_value'] self.timeout = kwargs.get('timeout', None) self.persist_across_upgrade = kwargs.get('persist_across_upgrade', None) -class ContainerApiRequestBody(Model): +class ContainerApiRequestBody(msrest.serialization.Model): """parameters for making container API call. All required parameters must be populated in order to send to Azure. - :param http_verb: HTTP verb of container REST API, defaults to "GET" + :param http_verb: HTTP verb of container REST API, defaults to "GET". :type http_verb: str - :param uri_path: Required. URI path of container REST API + :param uri_path: Required. URI path of container REST API. :type uri_path: str - :param content_type: Content type of container REST API request, defaults - to "application/json" + :param content_type: Content type of container REST API request, defaults to + "application/json". :type content_type: str - :param body: HTTP request body of container REST API + :param body: HTTP request body of container REST API. :type body: str """ @@ -6850,15 +7552,18 @@ class ContainerApiRequestBody(Model): 'body': {'key': 'Body', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerApiRequestBody, self).__init__(**kwargs) self.http_verb = kwargs.get('http_verb', None) - self.uri_path = kwargs.get('uri_path', None) + self.uri_path = kwargs['uri_path'] self.content_type = kwargs.get('content_type', None) self.body = kwargs.get('body', None) -class ContainerApiResponse(Model): +class ContainerApiResponse(msrest.serialization.Model): """Response body that wraps container API result. All required parameters must be populated in order to send to Azure. @@ -6875,24 +7580,26 @@ class ContainerApiResponse(Model): 'container_api_result': {'key': 'ContainerApiResult', 'type': 'ContainerApiResult'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerApiResponse, self).__init__(**kwargs) - self.container_api_result = kwargs.get('container_api_result', None) + self.container_api_result = kwargs['container_api_result'] -class ContainerApiResult(Model): +class ContainerApiResult(msrest.serialization.Model): """Container API result. All required parameters must be populated in order to send to Azure. - :param status: Required. HTTP status code returned by the target container - API + :param status: Required. HTTP status code returned by the target container API. :type status: int - :param content_type: HTTP content type + :param content_type: HTTP content type. :type content_type: str - :param content_encoding: HTTP content encoding + :param content_encoding: HTTP content encoding. :type content_encoding: str - :param body: container API result body + :param body: container API result body. :type body: str """ @@ -6907,19 +7614,21 @@ class ContainerApiResult(Model): 'body': {'key': 'Body', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerApiResult, self).__init__(**kwargs) - self.status = kwargs.get('status', None) + self.status = kwargs['status'] self.content_type = kwargs.get('content_type', None) self.content_encoding = kwargs.get('content_encoding', None) self.body = kwargs.get('body', None) -class ContainerCodePackageProperties(Model): +class ContainerCodePackageProperties(msrest.serialization.Model): """Describes a container and its runtime properties. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. @@ -6928,21 +7637,16 @@ class ContainerCodePackageProperties(Model): :param image: Required. The Container image to use. :type image: str :param image_registry_credential: Image registry credential. - :type image_registry_credential: - ~azure.servicefabric.models.ImageRegistryCredential + :type image_registry_credential: ~azure.servicefabric.models.ImageRegistryCredential :param entry_point: Override for the default entry point in the container. :type entry_point: str - :param commands: Command array to execute within the container in exec - form. + :param commands: Command array to execute within the container in exec form. :type commands: list[str] - :param environment_variables: The environment variables to set in this - container - :type environment_variables: - list[~azure.servicefabric.models.EnvironmentVariable] - :param settings: The settings to set in this container. The setting file - path can be fetched from environment variable "Fabric_SettingPath". The - path for Windows container is "C:\\\\secrets". The path for Linux - container is "/var/secrets". + :param environment_variables: The environment variables to set in this container. + :type environment_variables: list[~azure.servicefabric.models.EnvironmentVariable] + :param settings: The settings to set in this container. The setting file path can be fetched + from environment variable "Fabric_SettingPath". The path for Windows container is "C:\secrets". + The path for Linux container is "/var/secrets". :type settings: list[~azure.servicefabric.models.Setting] :param labels: The labels to set in this container. :type labels: list[~azure.servicefabric.models.ContainerLabel] @@ -6950,26 +7654,24 @@ class ContainerCodePackageProperties(Model): :type endpoints: list[~azure.servicefabric.models.EndpointProperties] :param resources: Required. The resources required by this container. :type resources: ~azure.servicefabric.models.ResourceRequirements - :param volume_refs: Volumes to be attached to the container. The lifetime - of these volumes is independent of the application's lifetime. + :param volume_refs: Volumes to be attached to the container. The lifetime of these volumes is + independent of the application's lifetime. :type volume_refs: list[~azure.servicefabric.models.VolumeReference] - :param volumes: Volumes to be attached to the container. The lifetime of - these volumes is scoped to the application's lifetime. + :param volumes: Volumes to be attached to the container. The lifetime of these volumes is + scoped to the application's lifetime. :type volumes: list[~azure.servicefabric.models.ApplicationScopedVolume] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef - :param reliable_collections_refs: A list of ReliableCollection resources - used by this particular code package. Please refer to - ReliableCollectionsRef for more details. - :type reliable_collections_refs: - list[~azure.servicefabric.models.ReliableCollectionsRef] + :param reliable_collections_refs: A list of ReliableCollection resources used by this + particular code package. Please refer to ReliableCollectionsRef for more details. + :type reliable_collections_refs: list[~azure.servicefabric.models.ReliableCollectionsRef] :ivar instance_view: Runtime information of a container instance. :vartype instance_view: ~azure.servicefabric.models.ContainerInstanceView - :param liveness_probe: An array of liveness probes for a code package. It - determines when to restart a code package. + :param liveness_probe: An array of liveness probes for a code package. It determines when to + restart a code package. :type liveness_probe: list[~azure.servicefabric.models.Probe] - :param readiness_probe: An array of readiness probes for a code package. - It determines when to unpublish an endpoint. + :param readiness_probe: An array of readiness probes for a code package. It determines when to + unpublish an endpoint. :type readiness_probe: list[~azure.servicefabric.models.Probe] """ @@ -7000,10 +7702,13 @@ class ContainerCodePackageProperties(Model): 'readiness_probe': {'key': 'readinessProbe', 'type': '[Probe]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerCodePackageProperties, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.image = kwargs.get('image', None) + self.name = kwargs['name'] + self.image = kwargs['image'] self.image_registry_credential = kwargs.get('image_registry_credential', None) self.entry_point = kwargs.get('entry_point', None) self.commands = kwargs.get('commands', None) @@ -7011,7 +7716,7 @@ def __init__(self, **kwargs): self.settings = kwargs.get('settings', None) self.labels = kwargs.get('labels', None) self.endpoints = kwargs.get('endpoints', None) - self.resources = kwargs.get('resources', None) + self.resources = kwargs['resources'] self.volume_refs = kwargs.get('volume_refs', None) self.volumes = kwargs.get('volumes', None) self.diagnostics = kwargs.get('diagnostics', None) @@ -7021,7 +7726,7 @@ def __init__(self, **kwargs): self.readiness_probe = kwargs.get('readiness_probe', None) -class ContainerEvent(Model): +class ContainerEvent(msrest.serialization.Model): """A container event. :param name: The name of the container event. @@ -7032,7 +7737,7 @@ class ContainerEvent(Model): :type first_timestamp: str :param last_timestamp: Date/time of the last event. :type last_timestamp: str - :param message: The event message + :param message: The event message. :type message: str :param type: The event type. :type type: str @@ -7047,7 +7752,10 @@ class ContainerEvent(Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerEvent, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.count = kwargs.get('count', None) @@ -7062,44 +7770,66 @@ class ContainerInstanceEvent(FabricEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerInstanceEvent, self).__init__(**kwargs) - self.kind = 'ContainerInstanceEvent' + self.kind = 'ContainerInstanceEvent' # type: str -class ContainerInstanceView(Model): +class ContainerInstanceView(msrest.serialization.Model): """Runtime information of a container instance. - :param restart_count: The number of times the container has been - restarted. + :param restart_count: The number of times the container has been restarted. :type restart_count: int :param current_state: Current container instance state. :type current_state: ~azure.servicefabric.models.ContainerState @@ -7116,7 +7846,10 @@ class ContainerInstanceView(Model): 'events': {'key': 'events', 'type': '[ContainerEvent]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerInstanceView, self).__init__(**kwargs) self.restart_count = kwargs.get('restart_count', None) self.current_state = kwargs.get('current_state', None) @@ -7124,7 +7857,7 @@ def __init__(self, **kwargs): self.events = kwargs.get('events', None) -class ContainerLabel(Model): +class ContainerLabel(msrest.serialization.Model): """Describes a container label. All required parameters must be populated in order to send to Azure. @@ -7145,13 +7878,16 @@ class ContainerLabel(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerLabel, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.value = kwargs.get('value', None) + self.name = kwargs['name'] + self.value = kwargs['value'] -class ContainerLogs(Model): +class ContainerLogs(msrest.serialization.Model): """Container logs. :param content: Container logs. @@ -7162,22 +7898,25 @@ class ContainerLogs(Model): 'content': {'key': 'Content', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerLogs, self).__init__(**kwargs) self.content = kwargs.get('content', None) -class ContainerState(Model): +class ContainerState(msrest.serialization.Model): """The container state. - :param state: The state of this container + :param state: The state of this container. :type state: str :param start_time: Date/time when the container state started. - :type start_time: datetime + :type start_time: ~datetime.datetime :param exit_code: The container exit code. :type exit_code: str :param finish_time: Date/time when the container state finished. - :type finish_time: datetime + :type finish_time: ~datetime.datetime :param detail_status: Human-readable status of this state. :type detail_status: str """ @@ -7190,7 +7929,10 @@ class ContainerState(Model): 'detail_status': {'key': 'detailStatus', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ContainerState, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.start_time = kwargs.get('start_time', None) @@ -7199,18 +7941,17 @@ def __init__(self, **kwargs): self.detail_status = kwargs.get('detail_status', None) -class CreateComposeDeploymentDescription(Model): +class CreateComposeDeploymentDescription(msrest.serialization.Model): """Defines description for creating a Service Fabric compose deployment. All required parameters must be populated in order to send to Azure. :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: Required. The content of the compose file - that describes the deployment to create. + :param compose_file_content: Required. The content of the compose file that describes the + deployment to create. :type compose_file_content: str - :param registry_credential: Credential information to connect to container - registry. + :param registry_credential: Credential information to connect to container registry. :type registry_credential: ~azure.servicefabric.models.RegistryCredential """ @@ -7225,22 +7966,23 @@ class CreateComposeDeploymentDescription(Model): 'registry_credential': {'key': 'RegistryCredential', 'type': 'RegistryCredential'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(CreateComposeDeploymentDescription, self).__init__(**kwargs) - self.deployment_name = kwargs.get('deployment_name', None) - self.compose_file_content = kwargs.get('compose_file_content', None) + self.deployment_name = kwargs['deployment_name'] + self.compose_file_content = kwargs['compose_file_content'] self.registry_credential = kwargs.get('registry_credential', None) -class CurrentUpgradeDomainProgressInfo(Model): +class CurrentUpgradeDomainProgressInfo(msrest.serialization.Model): """Information about the current in-progress upgrade domain. - :param domain_name: The name of the upgrade domain + :param domain_name: The name of the upgrade domain. :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their - statuses - :type node_upgrade_progress_list: - list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their statuses. + :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -7248,41 +7990,46 @@ class CurrentUpgradeDomainProgressInfo(Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(CurrentUpgradeDomainProgressInfo, self).__init__(**kwargs) self.domain_name = kwargs.get('domain_name', None) self.node_upgrade_progress_list = kwargs.get('node_upgrade_progress_list', None) -class DeactivationIntentDescription(Model): +class DeactivationIntentDescription(msrest.serialization.Model): """Describes the intent or reason for deactivating the node. - :param deactivation_intent: Describes the intent or reason for - deactivating the node. The possible values are following. Possible values - include: 'Pause', 'Restart', 'RemoveData' - :type deactivation_intent: str or - ~azure.servicefabric.models.DeactivationIntent + :param deactivation_intent: Describes the intent or reason for deactivating the node. The + possible values are following. Possible values include: "Pause", "Restart", "RemoveData". + :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent """ _attribute_map = { 'deactivation_intent': {'key': 'DeactivationIntent', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeactivationIntentDescription, self).__init__(**kwargs) self.deactivation_intent = kwargs.get('deactivation_intent', None) -class ExecutionPolicy(Model): +class ExecutionPolicy(msrest.serialization.Model): """The execution policy of the service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DefaultExecutionPolicy, RunToCompletionExecutionPolicy + sub-classes are: DefaultExecutionPolicy, RunToCompletionExecutionPolicy. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str + :param type: Required. Enumerates the execution policy types for services.Constant filled by + server. Possible values include: "Default", "RunToCompletion". + :type type: str or ~azure.servicefabric.models.ExecutionPolicyType """ _validation = { @@ -7297,9 +8044,12 @@ class ExecutionPolicy(Model): 'type': {'Default': 'DefaultExecutionPolicy', 'RunToCompletion': 'RunToCompletionExecutionPolicy'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ExecutionPolicy, self).__init__(**kwargs) - self.type = None + self.type = None # type: Optional[str] class DefaultExecutionPolicy(ExecutionPolicy): @@ -7307,8 +8057,9 @@ class DefaultExecutionPolicy(ExecutionPolicy): All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str + :param type: Required. Enumerates the execution policy types for services.Constant filled by + server. Possible values include: "Default", "RunToCompletion". + :type type: str or ~azure.servicefabric.models.ExecutionPolicyType """ _validation = { @@ -7319,76 +8070,85 @@ class DefaultExecutionPolicy(ExecutionPolicy): 'type': {'key': 'type', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DefaultExecutionPolicy, self).__init__(**kwargs) - self.type = 'Default' + self.type = 'Default' # type: str class DeletePropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that deletes a specified property if it - exists. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that deletes a specified property if it exists. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeletePropertyBatchOperation, self).__init__(**kwargs) - self.kind = 'Delete' + self.kind = 'Delete' # type: str class DeltaNodesCheckHealthEvaluation(HealthEvaluation): - """Represents health evaluation for delta nodes, containing health evaluations - for each unhealthy node that impacted current aggregated health state. - Can be returned during cluster upgrade when the aggregated health state of - the cluster is Warning or Error. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for delta nodes, containing health evaluations for each unhealthy node that impacted current aggregated health state. +Can be returned during cluster upgrade when the aggregated health state of the cluster is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param baseline_error_count: Number of nodes with aggregated heath state - Error in the health store at the beginning of the cluster upgrade. + :param baseline_error_count: Number of nodes with aggregated heath state Error in the health + store at the beginning of the cluster upgrade. :type baseline_error_count: long - :param baseline_total_count: Total number of nodes in the health store at - the beginning of the cluster upgrade. + :param baseline_total_count: Total number of nodes in the health store at the beginning of the + cluster upgrade. :type baseline_total_count: long - :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of - delta unhealthy nodes from the ClusterUpgradeHealthPolicy. + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of delta unhealthy nodes + from the ClusterUpgradeHealthPolicy. :type max_percent_delta_unhealthy_nodes: int :param total_count: Total number of nodes in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. - Includes all the unhealthy NodeHealthEvaluation that impacted the - aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. + Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -7396,9 +8156,9 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, 'max_percent_delta_unhealthy_nodes': {'key': 'MaxPercentDeltaUnhealthyNodes', 'type': 'int'}, @@ -7406,45 +8166,44 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeltaNodesCheckHealthEvaluation, self).__init__(**kwargs) + self.kind = 'DeltaNodesCheck' # type: str self.baseline_error_count = kwargs.get('baseline_error_count', None) self.baseline_total_count = kwargs.get('baseline_total_count', None) self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'DeltaNodesCheck' class DeployedApplicationHealth(EntityHealth): - """Information about the health of an application deployed on a Service Fabric - node. - - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + """Information about the health of an application deployed on a Service Fabric node. + + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: Name of the application deployed on the node whose health - information is described by this object. + :param name: Name of the application deployed on the node whose health information is described + by this object. :type name: str :param node_name: Name of the node where this application is deployed. :type node_name: str - :param deployed_service_package_health_states: Deployed service package - health states for the current deployed application as found in the health - store. + :param deployed_service_package_health_states: Deployed service package health states for the + current deployed application as found in the health store. :type deployed_service_package_health_states: list[~azure.servicefabric.models.DeployedServicePackageHealthState] """ @@ -7459,7 +8218,10 @@ class DeployedApplicationHealth(EntityHealth): 'deployed_service_package_health_states': {'key': 'DeployedServicePackageHealthStates', 'type': '[DeployedServicePackageHealthState]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationHealth, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.node_name = kwargs.get('node_name', None) @@ -7467,34 +8229,36 @@ def __init__(self, **kwargs): class DeployedApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a deployed application, containing - information about the data and the algorithm used by the health store to - evaluate health. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a deployed application, containing information about the data and the algorithm used by the health store to evaluate health. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Name of the node where the application is deployed to. :type node_name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the deployed application. - The types of the unhealthy evaluations can be - DeployedServicePackagesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the deployed application. + The types of the unhealthy evaluations can be DeployedServicePackagesHealthEvaluation or + EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -7502,20 +8266,23 @@ class DeployedApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationHealthEvaluation, self).__init__(**kwargs) + self.kind = 'DeployedApplication' # type: str self.node_name = kwargs.get('node_name', None) self.application_name = kwargs.get('application_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'DeployedApplication' class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): @@ -7523,25 +8290,44 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -7559,17 +8345,16 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'node_name': {'required': True}, @@ -7584,11 +8369,11 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, @@ -7602,36 +8387,34 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationHealthReportExpiredEvent, self).__init__(**kwargs) - self.application_instance_id = kwargs.get('application_instance_id', None) - self.node_name = kwargs.get('node_name', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'DeployedApplicationHealthReportExpired' + self.kind = 'DeployedApplicationHealthReportExpired' # type: str + self.application_instance_id = kwargs['application_instance_id'] + self.node_name = kwargs['node_name'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class DeployedApplicationHealthState(EntityHealthState): - """Represents the health state of a deployed application, which contains the - entity identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param node_name: Name of the node on which the service package is - deployed. + """Represents the health state of a deployed application, which contains the entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is deployed. :type node_name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str """ @@ -7641,27 +8424,27 @@ class DeployedApplicationHealthState(EntityHealthState): 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationHealthState, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.application_name = kwargs.get('application_name', None) class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a deployed application, which contains - the node where the application is deployed, the aggregated health state and - any deployed service packages that respect the chunk query description - filters. - - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + """Represents the health state chunk of a deployed application, which contains the node where the application is deployed, the aggregated health state and any deployed service packages that respect the chunk query description filters. + + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of node where the application is deployed. :type node_name: str - :param deployed_service_package_health_state_chunks: The list of deployed - service package health state chunks belonging to the deployed application - that respect the filters in the cluster health chunk query description. + :param deployed_service_package_health_state_chunks: The list of deployed service package + health state chunks belonging to the deployed application that respect the filters in the + cluster health chunk query description. :type deployed_service_package_health_state_chunks: ~azure.servicefabric.models.DeployedServicePackageHealthStateChunkList """ @@ -7672,93 +8455,81 @@ class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_service_package_health_state_chunks': {'key': 'DeployedServicePackageHealthStateChunks', 'type': 'DeployedServicePackageHealthStateChunkList'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationHealthStateChunk, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.deployed_service_package_health_state_chunks = kwargs.get('deployed_service_package_health_state_chunks', None) -class DeployedApplicationHealthStateChunkList(Model): - """The list of deployed application health state chunks that respect the input - filters in the chunk query. Returned by get cluster health state chunks - query. +class DeployedApplicationHealthStateChunkList(msrest.serialization.Model): + """The list of deployed application health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param items: The list of deployed application health state chunks that - respect the input filters in the chunk query. - :type items: - list[~azure.servicefabric.models.DeployedApplicationHealthStateChunk] + :param items: The list of deployed application health state chunks that respect the input + filters in the chunk query. + :type items: list[~azure.servicefabric.models.DeployedApplicationHealthStateChunk] """ _attribute_map = { 'items': {'key': 'Items', 'type': '[DeployedApplicationHealthStateChunk]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class DeployedApplicationHealthStateFilter(Model): - """Defines matching criteria to determine whether a deployed application - should be included as a child of an application in the cluster health - chunk. - The deployed applications are only returned if the parent application - matches a filter specified in the cluster health chunk query description. - One filter can match zero, one or multiple deployed applications, depending - on its properties. - - :param node_name_filter: The name of the node where the application is - deployed in order to match the filter. - If specified, the filter is applied only to the application deployed on - the specified node. - If the application is not deployed on the node with the specified name, no - deployed application is returned in the cluster health chunk based on this - filter. - Otherwise, the deployed application is included in the cluster health - chunk if it respects the other filter properties. - If not specified, all deployed applications that match the parent filters - (if any) are taken into consideration and matched against the other filter - members, like health state filter. +class DeployedApplicationHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a deployed application should be included as a child of an application in the cluster health chunk. +The deployed applications are only returned if the parent application matches a filter specified in the cluster health chunk query description. +One filter can match zero, one or multiple deployed applications, depending on its properties. + + :param node_name_filter: The name of the node where the application is deployed in order to + match the filter. + If specified, the filter is applied only to the application deployed on the specified node. + If the application is not deployed on the node with the specified name, no deployed + application is returned in the cluster health chunk based on this filter. + Otherwise, the deployed application is included in the cluster health chunk if it respects the + other filter properties. + If not specified, all deployed applications that match the parent filters (if any) are taken + into consideration and matched against the other filter members, like health state filter. :type node_name_filter: str - :param health_state_filter: The filter for the health state of the - deployed applications. It allows selecting deployed applications if they - match the desired health states. - The possible values are integer value of one of the following health - states. Only deployed applications that match the filter are returned. All - deployed applications are used to evaluate the cluster aggregated health - state. - If not specified, default value is None, unless the node name is - specified. If the filter has default value and node name is specified, the - matching deployed application is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches deployed applications - with HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the deployed applications. It + allows selecting deployed applications if they match the desired health states. + The possible values are integer value of one of the following health states. Only deployed + applications that match the filter are returned. All deployed applications are used to evaluate + the cluster aggregated health state. + If not specified, default value is None, unless the node name is specified. If the filter has + default value and node name is specified, the matching deployed application is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed applications with HealthState + value of OK (2) and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int - :param deployed_service_package_filters: Defines a list of filters that - specify which deployed service packages to be included in the returned - cluster health chunk as children of the parent deployed application. The - deployed service packages are returned only if the parent deployed + :param deployed_service_package_filters: Defines a list of filters that specify which deployed + service packages to be included in the returned cluster health chunk as children of the parent + deployed application. The deployed service packages are returned only if the parent deployed application matches a filter. - If the list is empty, no deployed service packages are returned. All the - deployed service packages are used to evaluate the parent deployed - application aggregated health state, regardless of the input filters. - The deployed application filter may specify multiple deployed service - package filters. - For example, it can specify a filter to return all deployed service - packages with health state Error and another filter to always include a - deployed service package on a node. + If the list is empty, no deployed service packages are returned. All the deployed service + packages are used to evaluate the parent deployed application aggregated health state, + regardless of the input filters. + The deployed application filter may specify multiple deployed service package filters. + For example, it can specify a filter to return all deployed service packages with health state + Error and another filter to always include a deployed service package on a node. :type deployed_service_package_filters: list[~azure.servicefabric.models.DeployedServicePackageHealthStateFilter] """ @@ -7769,47 +8540,45 @@ class DeployedApplicationHealthStateFilter(Model): 'deployed_service_package_filters': {'key': 'DeployedServicePackageFilters', 'type': '[DeployedServicePackageHealthStateFilter]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationHealthStateFilter, self).__init__(**kwargs) self.node_name_filter = kwargs.get('node_name_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) self.deployed_service_package_filters = kwargs.get('deployed_service_package_filters', None) -class DeployedApplicationInfo(Model): +class DeployedApplicationInfo(msrest.serialization.Model): """Information about application deployed on the node. - :param id: The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to - identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param id: The identity of the application. This is an encoded representation of the + application name. This is used in the REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI - scheme. + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str - :param type_name: The application type name as defined in the application - manifest. + :param type_name: The application type name as defined in the application manifest. :type type_name: str - :param status: The status of the application deployed on the node. - Following are the possible values. Possible values include: 'Invalid', - 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' + :param status: The status of the application deployed on the node. Following are the possible + values. Possible values include: "Invalid", "Downloading", "Activating", "Active", "Upgrading", + "Deactivating". :type status: str or ~azure.servicefabric.models.DeployedApplicationStatus - :param work_directory: The work directory of the application on the node. - The work directory can be used to store application data. + :param work_directory: The work directory of the application on the node. The work directory + can be used to store application data. :type work_directory: str - :param log_directory: The log directory of the application on the node. - The log directory can be used to store application logs. + :param log_directory: The log directory of the application on the node. The log directory can + be used to store application logs. :type log_directory: str - :param temp_directory: The temp directory of the application on the node. - The code packages belonging to the application are forked with this - directory set as their temporary directory. + :param temp_directory: The temp directory of the application on the node. The code packages + belonging to the application are forked with this directory set as their temporary directory. :type temp_directory: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState """ @@ -7824,7 +8593,10 @@ class DeployedApplicationInfo(Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.name = kwargs.get('name', None) @@ -7841,25 +8613,44 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -7877,17 +8668,16 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'node_name': {'required': True}, @@ -7902,11 +8692,11 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, @@ -7920,53 +8710,57 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationNewHealthReportEvent, self).__init__(**kwargs) - self.application_instance_id = kwargs.get('application_instance_id', None) - self.node_name = kwargs.get('node_name', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'DeployedApplicationNewHealthReport' + self.kind = 'DeployedApplicationNewHealthReport' # type: str + self.application_instance_id = kwargs['application_instance_id'] + self.node_name = kwargs['node_name'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class DeployedApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for deployed applications, containing health - evaluations for each unhealthy deployed application that impacted current - aggregated health state. - Can be returned when evaluating application health and the aggregated - health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for deployed applications, containing health evaluations for each unhealthy deployed application that impacted current aggregated health state. +Can be returned when evaluating application health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_deployed_applications: Maximum allowed - percentage of unhealthy deployed applications from the - ApplicationHealthPolicy. + :param max_percent_unhealthy_deployed_applications: Maximum allowed percentage of unhealthy + deployed applications from the ApplicationHealthPolicy. :type max_percent_unhealthy_deployed_applications: int - :param total_count: Total number of deployed applications of the - application in the health store. + :param total_count: Total number of deployed applications of the application in the health + store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - DeployedApplicationHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy DeployedApplicationHealthEvaluation that impacted the + aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -7974,64 +8768,60 @@ class DeployedApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_deployed_applications': {'key': 'MaxPercentUnhealthyDeployedApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedApplicationsHealthEvaluation, self).__init__(**kwargs) + self.kind = 'DeployedApplications' # type: str self.max_percent_unhealthy_deployed_applications = kwargs.get('max_percent_unhealthy_deployed_applications', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'DeployedApplications' -class DeployedCodePackageInfo(Model): +class DeployedCodePackageInfo(msrest.serialization.Model): """Information about code package deployed on a Service Fabric node. :param name: The name of the code package. :type name: str - :param version: The version of the code package specified in service - manifest. + :param version: The version of the code package specified in service manifest. :type version: str - :param service_manifest_name: The name of service manifest that specified - this code package. + :param service_manifest_name: The name of service manifest that specified this code package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_type: Specifies the type of host for main entry point of a - code package as specified in service manifest. Possible values include: - 'Invalid', 'ExeHost', 'ContainerHost' + :param host_type: Specifies the type of host for main entry point of a code package as + specified in service manifest. Possible values include: "Invalid", "ExeHost", "ContainerHost". :type host_type: str or ~azure.servicefabric.models.HostType - :param host_isolation_mode: Specifies the isolation mode of main entry - point of a code package when it's host type is ContainerHost. This is - specified as part of container host policies in application manifest while - importing service manifest. Possible values include: 'None', 'Process', - 'HyperV' - :type host_isolation_mode: str or - ~azure.servicefabric.models.HostIsolationMode - :param status: Specifies the status of a deployed application or service - package on a Service Fabric node. Possible values include: 'Invalid', - 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating', - 'RanToCompletion', 'Failed' + :param host_isolation_mode: Specifies the isolation mode of main entry point of a code package + when it's host type is ContainerHost. This is specified as part of container host policies in + application manifest while importing service manifest. Possible values include: "None", + "Process", "HyperV". + :type host_isolation_mode: str or ~azure.servicefabric.models.HostIsolationMode + :param status: Specifies the status of a deployed application or service package on a Service + Fabric node. Possible values include: "Invalid", "Downloading", "Activating", "Active", + "Upgrading", "Deactivating", "RanToCompletion", "Failed". :type status: str or ~azure.servicefabric.models.DeploymentStatus - :param run_frequency_interval: The interval at which code package is run. - This is used for periodic code package. + :param run_frequency_interval: The interval at which code package is run. This is used for + periodic code package. :type run_frequency_interval: str - :param setup_entry_point: Information about setup or main entry point of a - code package deployed on a Service Fabric node. + :param setup_entry_point: Information about setup or main entry point of a code package + deployed on a Service Fabric node. :type setup_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint - :param main_entry_point: Information about setup or main entry point of a - code package deployed on a Service Fabric node. + :param main_entry_point: Information about setup or main entry point of a code package deployed + on a Service Fabric node. :type main_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint """ @@ -8048,7 +8838,10 @@ class DeployedCodePackageInfo(Model): 'main_entry_point': {'key': 'MainEntryPoint', 'type': 'CodePackageEntryPoint'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedCodePackageInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.version = kwargs.get('version', None) @@ -8063,28 +8856,24 @@ def __init__(self, **kwargs): class DeployedServicePackageHealth(EntityHealth): - """Information about the health of a service package for a specific - application deployed on a Service Fabric node. - - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + """Information about the health of a service package for a specific application deployed on a Service Fabric node. + + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str :param service_manifest_name: Name of the service manifest. :type service_manifest_name: str @@ -8102,7 +8891,10 @@ class DeployedServicePackageHealth(EntityHealth): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackageHealth, self).__init__(**kwargs) self.application_name = kwargs.get('application_name', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) @@ -8110,36 +8902,36 @@ def __init__(self, **kwargs): class DeployedServicePackageHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a deployed service package, containing - information about the data and the algorithm used by health store to - evaluate health. The evaluation is returned only when the aggregated health - state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a deployed service package, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str :param service_manifest_name: The name of the service manifest. :type service_manifest_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state. The type of the unhealthy evaluations - can be EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state. The type of the unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -8147,22 +8939,25 @@ class DeployedServicePackageHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackageHealthEvaluation, self).__init__(**kwargs) + self.kind = 'DeployedServicePackage' # type: str self.node_name = kwargs.get('node_name', None) self.application_name = kwargs.get('application_name', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'DeployedServicePackage' class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): @@ -8170,33 +8965,50 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param service_manifest: Required. Service manifest name. :type service_manifest: str - :param service_package_instance_id: Required. Id of Service package - instance. + :param service_package_instance_id: Required. Id of Service package instance. :type service_package_instance_id: long - :param service_package_activation_id: Required. Id of Service package - activation. + :param service_package_activation_id: Required. Id of Service package activation. :type service_package_activation_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -8212,17 +9024,16 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'service_manifest': {'required': True}, 'service_package_instance_id': {'required': True}, @@ -8239,11 +9050,11 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_manifest': {'key': 'ServiceManifest', 'type': 'str'}, 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, @@ -8259,47 +9070,43 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackageHealthReportExpiredEvent, self).__init__(**kwargs) - self.service_manifest = kwargs.get('service_manifest', None) - self.service_package_instance_id = kwargs.get('service_package_instance_id', None) - self.service_package_activation_id = kwargs.get('service_package_activation_id', None) - self.node_name = kwargs.get('node_name', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'DeployedServicePackageHealthReportExpired' + self.kind = 'DeployedServicePackageHealthReportExpired' # type: str + self.service_manifest = kwargs['service_manifest'] + self.service_package_instance_id = kwargs['service_package_instance_id'] + self.service_package_activation_id = kwargs['service_package_activation_id'] + self.node_name = kwargs['node_name'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class DeployedServicePackageHealthState(EntityHealthState): - """Represents the health state of a deployed service package, containing the - entity identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param node_name: Name of the node on which the service package is - deployed. + """Represents the health state of a deployed service package, containing the entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is deployed. :type node_name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_manifest_name: Name of the manifest describing the service - package. + :param service_manifest_name: Name of the manifest describing the service package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -8312,7 +9119,10 @@ class DeployedServicePackageHealthState(EntityHealthState): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackageHealthState, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.application_name = kwargs.get('application_name', None) @@ -8321,21 +9131,18 @@ def __init__(self, **kwargs): class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a deployed service package, which - contains the service manifest name and the service package aggregated - health state. + """Represents the health state chunk of a deployed service package, which contains the service manifest name and the service package aggregated health state. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param service_manifest_name: The name of the service manifest. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -8346,88 +9153,79 @@ class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackageHealthStateChunk, self).__init__(**kwargs) self.service_manifest_name = kwargs.get('service_manifest_name', None) self.service_package_activation_id = kwargs.get('service_package_activation_id', None) -class DeployedServicePackageHealthStateChunkList(Model): - """The list of deployed service package health state chunks that respect the - input filters in the chunk query. Returned by get cluster health state - chunks query. +class DeployedServicePackageHealthStateChunkList(msrest.serialization.Model): + """The list of deployed service package health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param items: The list of deployed service package health state chunks - that respect the input filters in the chunk query. - :type items: - list[~azure.servicefabric.models.DeployedServicePackageHealthStateChunk] + :param items: The list of deployed service package health state chunks that respect the input + filters in the chunk query. + :type items: list[~azure.servicefabric.models.DeployedServicePackageHealthStateChunk] """ _attribute_map = { 'items': {'key': 'Items', 'type': '[DeployedServicePackageHealthStateChunk]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackageHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class DeployedServicePackageHealthStateFilter(Model): - """Defines matching criteria to determine whether a deployed service package - should be included as a child of a deployed application in the cluster - health chunk. - The deployed service packages are only returned if the parent entities - match a filter specified in the cluster health chunk query description. The - parent deployed application and its parent application must be included in - the cluster health chunk. - One filter can match zero, one or multiple deployed service packages, - depending on its properties. - - :param service_manifest_name_filter: The name of the service manifest - which identifies the deployed service packages that matches the filter. - If specified, the filter is applied only to the specified deployed service - packages, if any. - If no deployed service packages with specified manifest name exist, - nothing is returned in the cluster health chunk based on this filter. - If any deployed service package exists, they are included in the cluster - health chunk if it respects the other filter properties. - If not specified, all deployed service packages that match the parent - filters (if any) are taken into consideration and matched against the - other filter members, like health state filter. +class DeployedServicePackageHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a deployed service package should be included as a child of a deployed application in the cluster health chunk. +The deployed service packages are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent deployed application and its parent application must be included in the cluster health chunk. +One filter can match zero, one or multiple deployed service packages, depending on its properties. + + :param service_manifest_name_filter: The name of the service manifest which identifies the + deployed service packages that matches the filter. + If specified, the filter is applied only to the specified deployed service packages, if any. + If no deployed service packages with specified manifest name exist, nothing is returned in the + cluster health chunk based on this filter. + If any deployed service package exists, they are included in the cluster health chunk if it + respects the other filter properties. + If not specified, all deployed service packages that match the parent filters (if any) are + taken into consideration and matched against the other filter members, like health state + filter. :type service_manifest_name_filter: str - :param service_package_activation_id_filter: The activation ID of a - deployed service package that matches the filter. - If not specified, the filter applies to all deployed service packages that - match the other parameters. - If specified, the filter matches only the deployed service package with - the specified activation ID. + :param service_package_activation_id_filter: The activation ID of a deployed service package + that matches the filter. + If not specified, the filter applies to all deployed service packages that match the other + parameters. + If specified, the filter matches only the deployed service package with the specified + activation ID. :type service_package_activation_id_filter: str - :param health_state_filter: The filter for the health state of the - deployed service packages. It allows selecting deployed service packages - if they match the desired health states. - The possible values are integer value of one of the following health - states. Only deployed service packages that match the filter are returned. - All deployed service packages are used to evaluate the parent deployed - application aggregated health state. - If not specified, default value is None, unless the deployed service - package ID is specified. If the filter has default value and deployed - service package ID is specified, the matching deployed service package is - returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches deployed service - packages with HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the deployed service packages. + It allows selecting deployed service packages if they match the desired health states. + The possible values are integer value of one of the following health states. Only deployed + service packages that match the filter are returned. All deployed service packages are used to + evaluate the parent deployed application aggregated health state. + If not specified, default value is None, unless the deployed service package ID is specified. + If the filter has default value and deployed service package ID is specified, the matching + deployed service package is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed service packages with HealthState + value of OK (2) and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int """ @@ -8437,32 +9235,31 @@ class DeployedServicePackageHealthStateFilter(Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackageHealthStateFilter, self).__init__(**kwargs) self.service_manifest_name_filter = kwargs.get('service_manifest_name_filter', None) self.service_package_activation_id_filter = kwargs.get('service_package_activation_id_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) -class DeployedServicePackageInfo(Model): +class DeployedServicePackageInfo(msrest.serialization.Model): """Information about service package deployed on a Service Fabric node. - :param name: The name of the service package as specified in the service - manifest. + :param name: The name of the service package as specified in the service manifest. :type name: str - :param version: The version of the service package specified in service - manifest. + :param version: The version of the service package specified in service manifest. :type version: str - :param status: Specifies the status of a deployed application or service - package on a Service Fabric node. Possible values include: 'Invalid', - 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating', - 'RanToCompletion', 'Failed' + :param status: Specifies the status of a deployed application or service package on a Service + Fabric node. Possible values include: "Invalid", "Downloading", "Activating", "Active", + "Upgrading", "Deactivating", "RanToCompletion", "Failed". :type status: str or ~azure.servicefabric.models.DeploymentStatus - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -8474,7 +9271,10 @@ class DeployedServicePackageInfo(Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackageInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.version = kwargs.get('version', None) @@ -8487,33 +9287,50 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param service_manifest_name: Required. Service manifest name. :type service_manifest_name: str - :param service_package_instance_id: Required. Id of Service package - instance. + :param service_package_instance_id: Required. Id of Service package instance. :type service_package_instance_id: long - :param service_package_activation_id: Required. Id of Service package - activation. + :param service_package_activation_id: Required. Id of Service package activation. :type service_package_activation_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -8529,17 +9346,16 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'service_manifest_name': {'required': True}, 'service_package_instance_id': {'required': True}, @@ -8556,11 +9372,11 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, @@ -8576,52 +9392,55 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackageNewHealthReportEvent, self).__init__(**kwargs) - self.service_manifest_name = kwargs.get('service_manifest_name', None) - self.service_package_instance_id = kwargs.get('service_package_instance_id', None) - self.service_package_activation_id = kwargs.get('service_package_activation_id', None) - self.node_name = kwargs.get('node_name', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'DeployedServicePackageNewHealthReport' + self.kind = 'DeployedServicePackageNewHealthReport' # type: str + self.service_manifest_name = kwargs['service_manifest_name'] + self.service_package_instance_id = kwargs['service_package_instance_id'] + self.service_package_activation_id = kwargs['service_package_activation_id'] + self.node_name = kwargs['node_name'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class DeployedServicePackagesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for deployed service packages, containing - health evaluations for each unhealthy deployed service package that - impacted current aggregated health state. Can be returned when evaluating - deployed application health and the aggregated health state is either Error - or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for deployed service packages, containing health evaluations for each unhealthy deployed service package that impacted current aggregated health state. Can be returned when evaluating deployed application health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param total_count: Total number of deployed service packages of the - deployed application in the health store. + :param total_count: Total number of deployed service packages of the deployed application in + the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - DeployedServicePackageHealthEvaluation that impacted the aggregated - health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy DeployedServicePackageHealthEvaluation that impacted the + aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -8629,52 +9448,51 @@ class DeployedServicePackagesHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServicePackagesHealthEvaluation, self).__init__(**kwargs) + self.kind = 'DeployedServicePackages' # type: str self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'DeployedServicePackages' -class DeployedServiceReplicaDetailInfo(Model): +class DeployedServiceReplicaDetailInfo(msrest.serialization.Model): """Information about a Service Fabric service replica deployed on a node. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeployedStatefulServiceReplicaDetailInfo, - DeployedStatelessServiceInstanceDetailInfo + sub-classes are: DeployedStatefulServiceReplicaDetailInfo, DeployedStatelessServiceInstanceDetailInfo. All required parameters must be populated in order to send to Azure. - :param service_name: Full hierarchical name of the service in URI format - starting with `fabric:`. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: Full hierarchical name of the service in URI format starting with + ``fabric:``. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle - operation on a stateful service replica or stateless service instance. - Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', - 'Abort' - :type current_service_operation: str or - ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the - current service operation in UTC format. - :type current_service_operation_start_time_utc: datetime + :param current_service_operation: Specifies the current active life-cycle operation on a + stateful service replica or stateless service instance. Possible values include: "Unknown", + "None", "Open", "ChangeRole", "Close", "Abort". + :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the current service + operation in UTC format. + :type current_service_operation_start_time_utc: ~datetime.datetime :param reported_load: List of load reported by replica. - :type reported_load: - list[~azure.servicefabric.models.LoadMetricReportInfo] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] """ _validation = { @@ -8682,75 +9500,71 @@ class DeployedServiceReplicaDetailInfo(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaDetailInfo', 'Stateless': 'DeployedStatelessServiceInstanceDetailInfo'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServiceReplicaDetailInfo, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) self.current_service_operation = kwargs.get('current_service_operation', None) self.current_service_operation_start_time_utc = kwargs.get('current_service_operation_start_time_utc', None) self.reported_load = kwargs.get('reported_load', None) - self.service_kind = None -class DeployedServiceReplicaInfo(Model): +class DeployedServiceReplicaInfo(msrest.serialization.Model): """Information about a Service Fabric service replica deployed on a node. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeployedStatefulServiceReplicaInfo, - DeployedStatelessServiceInstanceInfo + sub-classes are: DeployedStatefulServiceReplicaInfo, DeployedStatelessServiceInstanceInfo. All required parameters must be populated in order to send to Azure. - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this - replica. + :param code_package_name: The name of the code package that hosts this replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or - ChangeRole. + :param address: The last address returned by the replica in Open or ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the - replica. This will be zero if the replica is down. In hyper-v containers - this host process ID will be from different kernel. + :param host_process_id: Host process ID of the process that is hosting the replica. This will + be zero if the replica is down. In hyper-v containers this host process ID will be from + different kernel. :type host_process_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -8758,6 +9572,7 @@ class DeployedServiceReplicaInfo(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -8767,15 +9582,18 @@ class DeployedServiceReplicaInfo(Model): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaInfo', 'Stateless': 'DeployedStatelessServiceInstanceInfo'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServiceReplicaInfo, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.service_name = kwargs.get('service_name', None) self.service_type_name = kwargs.get('service_type_name', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) @@ -8785,31 +9603,25 @@ def __init__(self, **kwargs): self.address = kwargs.get('address', None) self.service_package_activation_id = kwargs.get('service_package_activation_id', None) self.host_process_id = kwargs.get('host_process_id', None) - self.service_kind = None -class DeployedServiceTypeInfo(Model): - """Information about service type deployed on a node, information such as the - status of the service type registration on a node. +class DeployedServiceTypeInfo(msrest.serialization.Model): + """Information about service type deployed on a node, information such as the status of the service type registration on a node. - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that registered the - service type. + :param code_package_name: The name of the code package that registered the service type. :type code_package_name: str - :param status: The status of the service type registration on the node. - Possible values include: 'Invalid', 'Disabled', 'Enabled', 'Registered' - :type status: str or - ~azure.servicefabric.models.ServiceTypeRegistrationStatus - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param status: The status of the service type registration on the node. Possible values + include: "Invalid", "Disabled", "Enabled", "Registered". + :type status: str or ~azure.servicefabric.models.ServiceTypeRegistrationStatus + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -8822,7 +9634,10 @@ class DeployedServiceTypeInfo(Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedServiceTypeInfo, self).__init__(**kwargs) self.service_type_name = kwargs.get('service_type_name', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) @@ -8832,71 +9647,54 @@ def __init__(self, **kwargs): class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo): - """Information about a stateful replica running in a code package. Note - DeployedServiceReplicaQueryResult will contain duplicate data like - ServiceKind, ServiceName, PartitionId and replicaId. + """Information about a stateful replica running in a code package. Note DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and replicaId. All required parameters must be populated in order to send to Azure. - :param service_name: Full hierarchical name of the service in URI format - starting with `fabric:`. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: Full hierarchical name of the service in URI format starting with + ``fabric:``. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle - operation on a stateful service replica or stateless service instance. - Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', - 'Abort' - :type current_service_operation: str or - ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the - current service operation in UTC format. - :type current_service_operation_start_time_utc: datetime + :param current_service_operation: Specifies the current active life-cycle operation on a + stateful service replica or stateless service instance. Possible values include: "Unknown", + "None", "Open", "ChangeRole", "Close", "Abort". + :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the current service + operation in UTC format. + :type current_service_operation_start_time_utc: ~datetime.datetime :param reported_load: List of load reported by replica. - :type reported_load: - list[~azure.servicefabric.models.LoadMetricReportInfo] - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str - :param current_replicator_operation: Specifies the operation currently - being executed by the Replicator. Possible values include: 'Invalid', - 'None', 'Open', 'ChangeRole', 'UpdateEpoch', 'Close', 'Abort', - 'OnDataLoss', 'WaitForCatchup', 'Build' - :type current_replicator_operation: str or - ~azure.servicefabric.models.ReplicatorOperationName - :param read_status: Specifies the access status of the partition. Possible - values include: 'Invalid', 'Granted', 'ReconfigurationPending', - 'NotPrimary', 'NoWriteQuorum' - :type read_status: str or - ~azure.servicefabric.models.PartitionAccessStatus - :param write_status: Specifies the access status of the partition. - Possible values include: 'Invalid', 'Granted', 'ReconfigurationPending', - 'NotPrimary', 'NoWriteQuorum' - :type write_status: str or - ~azure.servicefabric.models.PartitionAccessStatus - :param replicator_status: Represents a base class for primary or secondary - replicator status. - Contains information about the service fabric replicator like the - replication/copy queue utilization, last acknowledgement received - timestamp, etc. + :param current_replicator_operation: Specifies the operation currently being executed by the + Replicator. Possible values include: "Invalid", "None", "Open", "ChangeRole", "UpdateEpoch", + "Close", "Abort", "OnDataLoss", "WaitForCatchup", "Build". + :type current_replicator_operation: str or ~azure.servicefabric.models.ReplicatorOperationName + :param read_status: Specifies the access status of the partition. Possible values include: + "Invalid", "Granted", "ReconfigurationPending", "NotPrimary", "NoWriteQuorum". + :type read_status: str or ~azure.servicefabric.models.PartitionAccessStatus + :param write_status: Specifies the access status of the partition. Possible values include: + "Invalid", "Granted", "ReconfigurationPending", "NotPrimary", "NoWriteQuorum". + :type write_status: str or ~azure.servicefabric.models.PartitionAccessStatus + :param replicator_status: Represents a base class for primary or secondary replicator status. + Contains information about the service fabric replicator like the replication/copy queue + utilization, last acknowledgement received timestamp, etc. :type replicator_status: ~azure.servicefabric.models.ReplicatorStatus - :param replica_status: Key value store related information for the - replica. - :type replica_status: - ~azure.servicefabric.models.KeyValueStoreReplicaStatus - :param deployed_service_replica_query_result: Information about a stateful - service replica deployed on a node. + :param replica_status: Key value store related information for the replica. + :type replica_status: ~azure.servicefabric.models.KeyValueStoreReplicaStatus + :param deployed_service_replica_query_result: Information about a stateful service replica + deployed on a node. :type deployed_service_replica_query_result: ~azure.servicefabric.models.DeployedStatefulServiceReplicaInfo """ @@ -8906,12 +9704,12 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, 'current_replicator_operation': {'key': 'CurrentReplicatorOperation', 'type': 'str'}, 'read_status': {'key': 'ReadStatus', 'type': 'str'}, @@ -8921,8 +9719,12 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatefulServiceReplicaInfo'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedStatefulServiceReplicaDetailInfo, self).__init__(**kwargs) + self.service_kind = 'Stateful' # type: str self.replica_id = kwargs.get('replica_id', None) self.current_replicator_operation = kwargs.get('current_replicator_operation', None) self.read_status = kwargs.get('read_status', None) @@ -8930,7 +9732,6 @@ def __init__(self, **kwargs): self.replicator_status = kwargs.get('replicator_status', None) self.replica_status = kwargs.get('replica_status', None) self.deployed_service_replica_query_result = kwargs.get('deployed_service_replica_query_result', None) - self.service_kind = 'Stateful' class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): @@ -8938,61 +9739,50 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): All required parameters must be populated in order to send to Azure. - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this - replica. + :param code_package_name: The name of the code package that hosts this replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or - ChangeRole. + :param address: The last address returned by the replica in Open or ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the - replica. This will be zero if the replica is down. In hyper-v containers - this host process ID will be from different kernel. + :param host_process_id: Host process ID of the process that is hosting the replica. This will + be zero if the replica is down. In hyper-v containers this host process ID will be from + different kernel. :type host_process_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str - :param replica_role: The role of a replica of a stateful service. Possible - values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', - 'ActiveSecondary' + :param replica_role: The role of a replica of a stateful service. Possible values include: + "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". :type replica_role: str or ~azure.servicefabric.models.ReplicaRole - :param reconfiguration_information: Information about current - reconfiguration like phase, type, previous configuration role of replica - and reconfiguration start date time. - :type reconfiguration_information: - ~azure.servicefabric.models.ReconfigurationInformation + :param reconfiguration_information: Information about current reconfiguration like phase, type, + previous configuration role of replica and reconfiguration start date time. + :type reconfiguration_information: ~azure.servicefabric.models.ReconfigurationInformation """ _validation = { @@ -9000,6 +9790,7 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -9009,58 +9800,54 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, 'reconfiguration_information': {'key': 'ReconfigurationInformation', 'type': 'ReconfigurationInformation'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedStatefulServiceReplicaInfo, self).__init__(**kwargs) + self.service_kind = 'Stateful' # type: str self.replica_id = kwargs.get('replica_id', None) self.replica_role = kwargs.get('replica_role', None) self.reconfiguration_information = kwargs.get('reconfiguration_information', None) - self.service_kind = 'Stateful' class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInfo): - """Information about a stateless instance running in a code package. Note that - DeployedServiceReplicaQueryResult will contain duplicate data like - ServiceKind, ServiceName, PartitionId and InstanceId. + """Information about a stateless instance running in a code package. Note that DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and InstanceId. All required parameters must be populated in order to send to Azure. - :param service_name: Full hierarchical name of the service in URI format - starting with `fabric:`. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: Full hierarchical name of the service in URI format starting with + ``fabric:``. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle - operation on a stateful service replica or stateless service instance. - Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', - 'Abort' - :type current_service_operation: str or - ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the - current service operation in UTC format. - :type current_service_operation_start_time_utc: datetime + :param current_service_operation: Specifies the current active life-cycle operation on a + stateful service replica or stateless service instance. Possible values include: "Unknown", + "None", "Open", "ChangeRole", "Close", "Abort". + :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the current service + operation in UTC format. + :type current_service_operation_start_time_utc: ~datetime.datetime :param reported_load: List of load reported by replica. - :type reported_load: - list[~azure.servicefabric.models.LoadMetricReportInfo] - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param instance_id: Id of a stateless service instance. InstanceId is used - by Service Fabric to uniquely identify an instance of a partition of a - stateless service. It is unique within a partition and does not change for - the lifetime of the instance. If the instance has failed over on the same - or different node, it will get a different value for the InstanceId. + :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] + :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to + uniquely identify an instance of a partition of a stateless service. It is unique within a + partition and does not change for the lifetime of the instance. If the instance has failed over + on the same or different node, it will get a different value for the InstanceId. :type instance_id: str - :param deployed_service_replica_query_result: Information about a - stateless service instance deployed on a node. + :param deployed_service_replica_query_result: Information about a stateless service instance + deployed on a node. :type deployed_service_replica_query_result: ~azure.servicefabric.models.DeployedStatelessServiceInstanceInfo """ @@ -9070,21 +9857,24 @@ class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInf } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatelessServiceInstanceInfo'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedStatelessServiceInstanceDetailInfo, self).__init__(**kwargs) + self.service_kind = 'Stateless' # type: str self.instance_id = kwargs.get('instance_id', None) self.deployed_service_replica_query_result = kwargs.get('deployed_service_replica_query_result', None) - self.service_kind = 'Stateless' class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): @@ -9092,49 +9882,42 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): All required parameters must be populated in order to send to Azure. - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this - replica. + :param code_package_name: The name of the code package that hosts this replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or - ChangeRole. + :param address: The last address returned by the replica in Open or ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the - replica. This will be zero if the replica is down. In hyper-v containers - this host process ID will be from different kernel. + :param host_process_id: Host process ID of the process that is hosting the replica. This will + be zero if the replica is down. In hyper-v containers this host process ID will be from + different kernel. :type host_process_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param instance_id: Id of a stateless service instance. InstanceId is used - by Service Fabric to uniquely identify an instance of a partition of a - stateless service. It is unique within a partition and does not change for - the lifetime of the instance. If the instance has failed over on the same - or different node, it will get a different value for the InstanceId. + :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to + uniquely identify an instance of a partition of a stateless service. It is unique within a + partition and does not change for the lifetime of the instance. If the instance has failed over + on the same or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -9143,6 +9926,7 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -9152,36 +9936,36 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployedStatelessServiceInstanceInfo, self).__init__(**kwargs) + self.service_kind = 'Stateless' # type: str self.instance_id = kwargs.get('instance_id', None) - self.service_kind = 'Stateless' -class DeployServicePackageToNodeDescription(Model): - """Defines description for downloading packages associated with a service - manifest to image cache on a Service Fabric node. +class DeployServicePackageToNodeDescription(msrest.serialization.Model): + """Defines description for downloading packages associated with a service manifest to image cache on a Service Fabric node. All required parameters must be populated in order to send to Azure. - :param service_manifest_name: Required. The name of service manifest whose - packages need to be downloaded. + :param service_manifest_name: Required. The name of service manifest whose packages need to be + downloaded. :type service_manifest_name: str - :param application_type_name: Required. The application type name as - defined in the application manifest. + :param application_type_name: Required. The application type name as defined in the application + manifest. :type application_type_name: str - :param application_type_version: Required. The version of the application - type as defined in the application manifest. + :param application_type_version: Required. The version of the application type as defined in + the application manifest. :type application_type_version: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param package_sharing_policy: List of package sharing policy information. - :type package_sharing_policy: - list[~azure.servicefabric.models.PackageSharingPolicyInfo] + :type package_sharing_policy: list[~azure.servicefabric.models.PackageSharingPolicyInfo] """ _validation = { @@ -9199,24 +9983,27 @@ class DeployServicePackageToNodeDescription(Model): 'package_sharing_policy': {'key': 'PackageSharingPolicy', 'type': '[PackageSharingPolicyInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DeployServicePackageToNodeDescription, self).__init__(**kwargs) - self.service_manifest_name = kwargs.get('service_manifest_name', None) - self.application_type_name = kwargs.get('application_type_name', None) - self.application_type_version = kwargs.get('application_type_version', None) - self.node_name = kwargs.get('node_name', None) + self.service_manifest_name = kwargs['service_manifest_name'] + self.application_type_name = kwargs['application_type_name'] + self.application_type_version = kwargs['application_type_version'] + self.node_name = kwargs['node_name'] self.package_sharing_policy = kwargs.get('package_sharing_policy', None) -class DiagnosticsDescription(Model): +class DiagnosticsDescription(msrest.serialization.Model): """Describes the diagnostics options available. :param sinks: List of supported sinks that can be referenced. :type sinks: list[~azure.servicefabric.models.DiagnosticsSinkProperties] :param enabled: Status of whether or not sinks are enabled. :type enabled: bool - :param default_sink_refs: The sinks to be used if diagnostics is enabled. - Sink choices can be overridden at the service and code package level. + :param default_sink_refs: The sinks to be used if diagnostics is enabled. Sink choices can be + overridden at the service and code package level. :type default_sink_refs: list[str] """ @@ -9226,20 +10013,23 @@ class DiagnosticsDescription(Model): 'default_sink_refs': {'key': 'defaultSinkRefs', 'type': '[str]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DiagnosticsDescription, self).__init__(**kwargs) self.sinks = kwargs.get('sinks', None) self.enabled = kwargs.get('enabled', None) self.default_sink_refs = kwargs.get('default_sink_refs', None) -class DiagnosticsRef(Model): +class DiagnosticsRef(msrest.serialization.Model): """Reference to sinks in DiagnosticsDescription. :param enabled: Status of whether or not sinks are enabled. :type enabled: bool - :param sink_refs: List of sinks to be used if enabled. References the list - of sinks in DiagnosticsDescription. + :param sink_refs: List of sinks to be used if enabled. References the list of sinks in + DiagnosticsDescription. :type sink_refs: list[str] """ @@ -9248,21 +10038,23 @@ class DiagnosticsRef(Model): 'sink_refs': {'key': 'sinkRefs', 'type': '[str]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DiagnosticsRef, self).__init__(**kwargs) self.enabled = kwargs.get('enabled', None) self.sink_refs = kwargs.get('sink_refs', None) -class DisableBackupDescription(Model): - """It describes the body parameters while disabling backup of a backup - entity(Application/Service/Partition). +class DisableBackupDescription(msrest.serialization.Model): + """It describes the body parameters while disabling backup of a backup entity(Application/Service/Partition). All required parameters must be populated in order to send to Azure. - :param clean_backup: Required. Boolean flag to delete backups. It can be - set to true for deleting all the backups which were created for the backup - entity that is getting disabled for backup. + :param clean_backup: Required. Boolean flag to delete backups. It can be set to true for + deleting all the backups which were created for the backup entity that is getting disabled for + backup. :type clean_backup: bool """ @@ -9274,17 +10066,20 @@ class DisableBackupDescription(Model): 'clean_backup': {'key': 'CleanBackup', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DisableBackupDescription, self).__init__(**kwargs) - self.clean_backup = kwargs.get('clean_backup', None) + self.clean_backup = kwargs['clean_backup'] -class DiskInfo(Model): +class DiskInfo(msrest.serialization.Model): """Information about the disk. - :param capacity: the disk size in bytes + :param capacity: the disk size in bytes. :type capacity: str - :param available_space: the available disk space in bytes + :param available_space: the available disk space in bytes. :type available_space: str """ @@ -9293,7 +10088,10 @@ class DiskInfo(Model): 'available_space': {'key': 'AvailableSpace', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DiskInfo, self).__init__(**kwargs) self.capacity = kwargs.get('capacity', None) self.available_space = kwargs.get('available_space', None) @@ -9304,8 +10102,10 @@ class DoublePropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind :param data: Required. The data of the property value. :type data: float """ @@ -9320,27 +10120,31 @@ class DoublePropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DoublePropertyValue, self).__init__(**kwargs) - self.data = kwargs.get('data', None) - self.kind = 'Double' + self.kind = 'Double' # type: str + self.data = kwargs['data'] class DsmsAzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Dsms Azure blob store used for storing and - enumerating backups. + """Describes the parameters for Dsms Azure blob store used for storing and enumerating backups. All required parameters must be populated in order to send to Azure. + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_kind: Required. Constant filled by server. - :type storage_kind: str - :param storage_credentials_source_location: Required. The source location - of the storage credentials to connect to the Dsms Azure blob store. + :param storage_credentials_source_location: Required. The source location of the storage + credentials to connect to the Dsms Azure blob store. :type storage_credentials_source_location: str - :param container_name: Required. The name of the container in the blob - store to store and enumerate backups from. + :param container_name: Required. The name of the container in the blob store to store and + enumerate backups from. :type container_name: str """ @@ -9351,26 +10155,29 @@ class DsmsAzureBlobBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_credentials_source_location': {'key': 'StorageCredentialsSourceLocation', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(DsmsAzureBlobBackupStorageDescription, self).__init__(**kwargs) - self.storage_credentials_source_location = kwargs.get('storage_credentials_source_location', None) - self.container_name = kwargs.get('container_name', None) - self.storage_kind = 'DsmsAzureBlobStore' + self.storage_kind = 'DsmsAzureBlobStore' # type: str + self.storage_credentials_source_location = kwargs['storage_credentials_source_location'] + self.container_name = kwargs['container_name'] -class EnableBackupDescription(Model): +class EnableBackupDescription(msrest.serialization.Model): """Specifies the parameters needed to enable periodic backup. All required parameters must be populated in order to send to Azure. - :param backup_policy_name: Required. Name of the backup policy to be used - for enabling periodic backups. + :param backup_policy_name: Required. Name of the backup policy to be used for enabling periodic + backups. :type backup_policy_name: str """ @@ -9382,12 +10189,15 @@ class EnableBackupDescription(Model): 'backup_policy_name': {'key': 'BackupPolicyName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EnableBackupDescription, self).__init__(**kwargs) - self.backup_policy_name = kwargs.get('backup_policy_name', None) + self.backup_policy_name = kwargs['backup_policy_name'] -class EndpointProperties(Model): +class EndpointProperties(msrest.serialization.Model): """Describes a container endpoint. All required parameters must be populated in order to send to Azure. @@ -9407,13 +10217,16 @@ class EndpointProperties(Model): 'port': {'key': 'port', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EndpointProperties, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.port = kwargs.get('port', None) -class EndpointRef(Model): +class EndpointRef(msrest.serialization.Model): """Describes a reference to a service endpoint. :param name: Name of the endpoint. @@ -9424,23 +10237,28 @@ class EndpointRef(Model): 'name': {'key': 'name', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EndpointRef, self).__init__(**kwargs) self.name = kwargs.get('name', None) -class SafetyCheck(Model): - """Represents a safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service - and the reliability of the state. +class SafetyCheck(msrest.serialization.Model): + """Represents a safety check performed by service fabric before continuing with the operations. These checks ensure the availability of the service and the reliability of the state. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionSafetyCheck, SeedNodeSafetyCheck + sub-classes are: SeedNodeSafetyCheck, PartitionSafetyCheck. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind """ _validation = { @@ -9452,30 +10270,32 @@ class SafetyCheck(Model): } _subtype_map = { - 'kind': {'PartitionSafetyCheck': 'PartitionSafetyCheck', 'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck'} + 'kind': {'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck', 'PartitionSafetyCheck': 'PartitionSafetyCheck'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SafetyCheck, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class PartitionSafetyCheck(SafetyCheck): - """Represents a safety check for the service partition being performed by - service fabric before continuing with operations. + """Represents a safety check for the service partition being performed by service fabric before continuing with operations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: EnsureAvailabilitySafetyCheck, - EnsurePartitionQuorumSafetyCheck, WaitForInbuildReplicaSafetyCheck, - WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, - WaitForReconfigurationSafetyCheck + sub-classes are: EnsureAvailabilitySafetyCheck, EnsurePartitionQuorumSafetyCheck, WaitForInbuildReplicaSafetyCheck, WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, WaitForReconfigurationSafetyCheck. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -9492,23 +10312,27 @@ class PartitionSafetyCheck(SafetyCheck): 'kind': {'EnsureAvailability': 'EnsureAvailabilitySafetyCheck', 'EnsurePartitionQuorum': 'EnsurePartitionQuorumSafetyCheck', 'WaitForInbuildReplica': 'WaitForInbuildReplicaSafetyCheck', 'WaitForPrimaryPlacement': 'WaitForPrimaryPlacementSafetyCheck', 'WaitForPrimarySwap': 'WaitForPrimarySwapSafetyCheck', 'WaitForReconfiguration': 'WaitForReconfigurationSafetyCheck'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionSafetyCheck, self).__init__(**kwargs) + self.kind = 'PartitionSafetyCheck' # type: str self.partition_id = kwargs.get('partition_id', None) - self.kind = 'PartitionSafetyCheck' class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): - """Safety check that waits to ensure the availability of the partition. It - waits until there are replicas available such that bringing down this - replica will not cause availability loss for the partition. + """Safety check that waits to ensure the availability of the partition. It waits until there are replicas available such that bringing down this replica will not cause availability loss for the partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -9521,21 +10345,26 @@ class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EnsureAvailabilitySafetyCheck, self).__init__(**kwargs) - self.kind = 'EnsureAvailability' + self.kind = 'EnsureAvailability' # type: str class EnsurePartitionQuorumSafetyCheck(PartitionSafetyCheck): - """Safety check that ensures that a quorum of replicas are not lost for a - partition. + """Safety check that ensures that a quorum of replicas are not lost for a partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -9548,21 +10377,22 @@ class EnsurePartitionQuorumSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EnsurePartitionQuorumSafetyCheck, self).__init__(**kwargs) - self.kind = 'EnsurePartitionQuorum' + self.kind = 'EnsurePartitionQuorum' # type: str -class EntityKindHealthStateCount(Model): +class EntityKindHealthStateCount(msrest.serialization.Model): """Represents health state count for entities of the specified entity kind. - :param entity_kind: The entity kind for which health states are evaluated. - Possible values include: 'Invalid', 'Node', 'Partition', 'Service', - 'Application', 'Replica', 'DeployedApplication', 'DeployedServicePackage', - 'Cluster' + :param entity_kind: The entity kind for which health states are evaluated. Possible values + include: "Invalid", "Node", "Partition", "Service", "Application", "Replica", + "DeployedApplication", "DeployedServicePackage", "Cluster". :type entity_kind: str or ~azure.servicefabric.models.EntityKind - :param health_state_count: The health state count for the entities of the - specified kind. + :param health_state_count: The health state count for the entities of the specified kind. :type health_state_count: ~azure.servicefabric.models.HealthStateCount """ @@ -9571,23 +10401,25 @@ class EntityKindHealthStateCount(Model): 'health_state_count': {'key': 'HealthStateCount', 'type': 'HealthStateCount'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EntityKindHealthStateCount, self).__init__(**kwargs) self.entity_kind = kwargs.get('entity_kind', None) self.health_state_count = kwargs.get('health_state_count', None) -class EnvironmentVariable(Model): +class EnvironmentVariable(msrest.serialization.Model): """Describes an environment variable for the container. - :param type: The type of the environment variable being given in value. - Possible values include: 'ClearText', 'KeyVaultReference', - 'SecretValueReference'. Default value: "ClearText" . + :param type: The type of the environment variable being given in value. Possible values + include: "ClearText", "KeyVaultReference", "SecretValueReference". Default value: "ClearText". :type type: str or ~azure.servicefabric.models.EnvironmentVariableType :param name: The name of the environment variable. :type name: str - :param value: The value of the environment variable, will be processed - based on the type provided. + :param value: The value of the environment variable, will be processed based on the type + provided. :type value: str """ @@ -9597,28 +10429,26 @@ class EnvironmentVariable(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EnvironmentVariable, self).__init__(**kwargs) self.type = kwargs.get('type', "ClearText") self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) -class Epoch(Model): - """An Epoch is a configuration number for the partition as a whole. When the - configuration of the replica set changes, for example when the Primary - replica changes, the operations that are replicated from the new Primary - replica are said to be a new Epoch from the ones which were sent by the old - Primary replica. +class Epoch(msrest.serialization.Model): + """An Epoch is a configuration number for the partition as a whole. When the configuration of the replica set changes, for example when the Primary replica changes, the operations that are replicated from the new Primary replica are said to be a new Epoch from the ones which were sent by the old Primary replica. - :param configuration_version: The current configuration number of this - Epoch. The configuration number is an increasing value that is updated - whenever the configuration of this replica set changes. + :param configuration_version: The current configuration number of this Epoch. The configuration + number is an increasing value that is updated whenever the configuration of this replica set + changes. :type configuration_version: str - :param data_loss_version: The current data loss number of this Epoch. The - data loss number property is an increasing value which is updated whenever - data loss is suspected, as when loss of a quorum of replicas in the - replica set that includes the Primary replica. + :param data_loss_version: The current data loss number of this Epoch. The data loss number + property is an increasing value which is updated whenever data loss is suspected, as when loss + of a quorum of replicas in the replica set that includes the Primary replica. :type data_loss_version: str """ @@ -9627,38 +10457,43 @@ class Epoch(Model): 'data_loss_version': {'key': 'DataLossVersion', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(Epoch, self).__init__(**kwargs) self.configuration_version = kwargs.get('configuration_version', None) self.data_loss_version = kwargs.get('data_loss_version', None) class EventHealthEvaluation(HealthEvaluation): - """Represents health evaluation of a HealthEvent that was reported on the - entity. - The health evaluation is returned when evaluating health of an entity - results in Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation of a HealthEvent that was reported on the entity. +The health evaluation is returned when evaluating health of an entity results in Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param consider_warning_as_error: Indicates whether warnings are treated - with the same severity as errors. The field is specified in the health - policy used to evaluate the entity. + :param consider_warning_as_error: Indicates whether warnings are treated with the same severity + as errors. The field is specified in the health policy used to evaluate the entity. :type consider_warning_as_error: bool - :param unhealthy_event: Represents health information reported on a health - entity, such as cluster, application or node, with additional metadata - added by the Health Manager. + :param unhealthy_event: Represents health information reported on a health entity, such as + cluster, application or node, with additional metadata added by the Health Manager. :type unhealthy_event: ~azure.servicefabric.models.HealthEvent """ @@ -9667,152 +10502,158 @@ class EventHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'consider_warning_as_error': {'key': 'ConsiderWarningAsError', 'type': 'bool'}, 'unhealthy_event': {'key': 'UnhealthyEvent', 'type': 'HealthEvent'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(EventHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Event' # type: str self.consider_warning_as_error = kwargs.get('consider_warning_as_error', None) self.unhealthy_event = kwargs.get('unhealthy_event', None) - self.kind = 'Event' class ExecutingFaultsChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos has decided on the - faults for an iteration. This Chaos event contains the details of the - faults as a list of strings. + """Describes a Chaos event that gets generated when Chaos has decided on the faults for an iteration. This Chaos event contains the details of the faults as a list of strings. All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param faults: List of string description of the faults that Chaos decided - to execute in an iteration. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param faults: List of string description of the faults that Chaos decided to execute in an + iteration. :type faults: list[str] """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'faults': {'key': 'Faults', 'type': '[str]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ExecutingFaultsChaosEvent, self).__init__(**kwargs) + self.kind = 'ExecutingFaults' # type: str self.faults = kwargs.get('faults', None) - self.kind = 'ExecutingFaults' -class ProvisionApplicationTypeDescriptionBase(Model): - """Represents the type of registration or provision requested, and if the - operation needs to be asynchronous or not. Supported types of provision - operations are from either image store or external store. +class ProvisionApplicationTypeDescriptionBase(msrest.serialization.Model): + """Represents the type of registration or provision requested, and if the operation needs to be asynchronous or not. Supported types of provision operations are from either image store or external store. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ProvisionApplicationTypeDescription, - ExternalStoreProvisionApplicationTypeDescription + sub-classes are: ExternalStoreProvisionApplicationTypeDescription, ProvisionApplicationTypeDescription. All required parameters must be populated in order to send to Azure. - :param async_property: Required. Indicates whether or not provisioning - should occur asynchronously. When set to true, the provision operation - returns when the request is accepted by the system, and the provision - operation continues without any timeout limit. The default value is false. - For large application packages, we recommend setting the value to true. + :param kind: Required. The kind of application type registration or provision requested. The + application package can be registered or provisioned either from the image store or from an + external store. Following are the kinds of the application type provision.Constant filled by + server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". + :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind + :param async_property: Required. Indicates whether or not provisioning should occur + asynchronously. When set to true, the provision operation returns when the request is accepted + by the system, and the provision operation continues without any timeout limit. The default + value is false. For large application packages, we recommend setting the value to true. :type async_property: bool - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { - 'async_property': {'required': True}, 'kind': {'required': True}, + 'async_property': {'required': True}, } _attribute_map = { - 'async_property': {'key': 'Async', 'type': 'bool'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'async_property': {'key': 'Async', 'type': 'bool'}, } _subtype_map = { - 'kind': {'ImageStorePath': 'ProvisionApplicationTypeDescription', 'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription'} + 'kind': {'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription', 'ImageStorePath': 'ProvisionApplicationTypeDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ProvisionApplicationTypeDescriptionBase, self).__init__(**kwargs) - self.async_property = kwargs.get('async_property', None) - self.kind = None + self.kind = None # type: Optional[str] + self.async_property = kwargs['async_property'] class ExternalStoreProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): - """Describes the operation to register or provision an application type using - an application package from an external store instead of a package uploaded - to the Service Fabric image store. + """Describes the operation to register or provision an application type using an application package from an external store instead of a package uploaded to the Service Fabric image store. All required parameters must be populated in order to send to Azure. - :param async_property: Required. Indicates whether or not provisioning - should occur asynchronously. When set to true, the provision operation - returns when the request is accepted by the system, and the provision - operation continues without any timeout limit. The default value is false. - For large application packages, we recommend setting the value to true. + :param kind: Required. The kind of application type registration or provision requested. The + application package can be registered or provisioned either from the image store or from an + external store. Following are the kinds of the application type provision.Constant filled by + server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". + :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind + :param async_property: Required. Indicates whether or not provisioning should occur + asynchronously. When set to true, the provision operation returns when the request is accepted + by the system, and the provision operation continues without any timeout limit. The default + value is false. For large application packages, we recommend setting the value to true. :type async_property: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_package_download_uri: Required. The path to the - '.sfpkg' application package from where the application package can be - downloaded using HTTP or HTTPS protocols. The application package can be - stored in an external store that provides GET operation to download the - file. Supported protocols are HTTP and HTTPS, and the path must allow READ - access. + :param application_package_download_uri: Required. The path to the '.sfpkg' application package + from where the application package can be downloaded using HTTP or HTTPS protocols. The + application package can be stored in an external store that provides GET operation to download + the file. Supported protocols are HTTP and HTTPS, and the path must allow READ access. :type application_package_download_uri: str - :param application_type_name: Required. The application type name - represents the name of the application type found in the application - manifest. + :param application_type_name: Required. The application type name represents the name of the + application type found in the application manifest. :type application_type_name: str - :param application_type_version: Required. The application type version - represents the version of the application type found in the application - manifest. + :param application_type_version: Required. The application type version represents the version + of the application type found in the application manifest. :type application_type_version: str """ _validation = { - 'async_property': {'required': True}, 'kind': {'required': True}, + 'async_property': {'required': True}, 'application_package_download_uri': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, } _attribute_map = { - 'async_property': {'key': 'Async', 'type': 'bool'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'async_property': {'key': 'Async', 'type': 'bool'}, 'application_package_download_uri': {'key': 'ApplicationPackageDownloadUri', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ExternalStoreProvisionApplicationTypeDescription, self).__init__(**kwargs) - self.application_package_download_uri = kwargs.get('application_package_download_uri', None) - self.application_type_name = kwargs.get('application_type_name', None) - self.application_type_version = kwargs.get('application_type_version', None) - self.kind = 'ExternalStore' + self.kind = 'ExternalStore' # type: str + self.application_package_download_uri = kwargs['application_package_download_uri'] + self.application_type_name = kwargs['application_type_name'] + self.application_type_version = kwargs['application_type_version'] -class FabricCodeVersionInfo(Model): +class FabricCodeVersionInfo(msrest.serialization.Model): """Information about a Service Fabric code version. :param code_version: The product version of Service Fabric. @@ -9823,12 +10664,15 @@ class FabricCodeVersionInfo(Model): 'code_version': {'key': 'CodeVersion', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FabricCodeVersionInfo, self).__init__(**kwargs) self.code_version = kwargs.get('code_version', None) -class FabricConfigVersionInfo(Model): +class FabricConfigVersionInfo(msrest.serialization.Model): """Information about a Service Fabric config version. :param config_version: The config version of Service Fabric. @@ -9839,20 +10683,20 @@ class FabricConfigVersionInfo(Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FabricConfigVersionInfo, self).__init__(**kwargs) self.config_version = kwargs.get('config_version', None) -class FabricError(Model): - """The REST API operations for Service Fabric return standard HTTP status - codes. This type defines the additional information returned from the - Service Fabric API operations that are not successful. +class FabricError(msrest.serialization.Model): + """The REST API operations for Service Fabric return standard HTTP status codes. This type defines the additional information returned from the Service Fabric API operations that are not successful. All required parameters must be populated in order to send to Azure. - :param error: Required. Error object containing error code and error - message. + :param error: Required. Error object containing error code and error message. :type error: ~azure.servicefabric.models.FabricErrorError """ @@ -9864,184 +10708,182 @@ class FabricError(Model): 'error': {'key': 'Error', 'type': 'FabricErrorError'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FabricError, self).__init__(**kwargs) - self.error = kwargs.get('error', None) - - -class FabricErrorException(HttpOperationError): - """Server responsed with exception of type: 'FabricError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, deserialize, response, *args): - - super(FabricErrorException, self).__init__(deserialize, response, 'FabricError', *args) + self.error = kwargs['error'] -class FabricErrorError(Model): +class FabricErrorError(msrest.serialization.Model): """Error object containing error code and error message. All required parameters must be populated in order to send to Azure. - :param code: Required. Defines the fabric error codes that be returned as - part of the error object in response to Service Fabric API operations that - are not successful. Following are the error code values that can be - returned for a specific HTTP status code. - - Possible values of the error code for HTTP status code 400 (Bad Request) - - "FABRIC_E_INVALID_PARTITION_KEY" - - "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" - - "FABRIC_E_INVALID_ADDRESS" - - "FABRIC_E_APPLICATION_NOT_UPGRADING" - - "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" - - "FABRIC_E_FABRIC_NOT_UPGRADING" - - "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" - - "FABRIC_E_INVALID_CONFIGURATION" - - "FABRIC_E_INVALID_NAME_URI" - - "FABRIC_E_PATH_TOO_LONG" - - "FABRIC_E_KEY_TOO_LARGE" - - "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" - - "FABRIC_E_INVALID_ATOMIC_GROUP" - - "FABRIC_E_VALUE_EMPTY" - - "FABRIC_E_BACKUP_IS_ENABLED" - - "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" - - "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" - - "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" - - "E_INVALIDARG" - - Possible values of the error code for HTTP status code 404 (Not Found) - - "FABRIC_E_NODE_NOT_FOUND" - - "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" - - "FABRIC_E_APPLICATION_NOT_FOUND" - - "FABRIC_E_SERVICE_TYPE_NOT_FOUND" - - "FABRIC_E_SERVICE_DOES_NOT_EXIST" - - "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" - - "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" - - "FABRIC_E_PARTITION_NOT_FOUND" - - "FABRIC_E_REPLICA_DOES_NOT_EXIST" - - "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" - - "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" - - "FABRIC_E_DIRECTORY_NOT_FOUND" - - "FABRIC_E_FABRIC_VERSION_NOT_FOUND" - - "FABRIC_E_FILE_NOT_FOUND" - - "FABRIC_E_NAME_DOES_NOT_EXIST" - - "FABRIC_E_PROPERTY_DOES_NOT_EXIST" - - "FABRIC_E_ENUMERATION_COMPLETED" - - "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" - - "FABRIC_E_KEY_NOT_FOUND" - - "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" - - "FABRIC_E_BACKUP_NOT_ENABLED" - - "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" - - "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" - - "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" - - Possible values of the error code for HTTP status code 409 (Conflict) - - "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" - - "FABRIC_E_APPLICATION_ALREADY_EXISTS" - - "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" - - "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" - - "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" - - "FABRIC_E_SERVICE_ALREADY_EXISTS" - - "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" - - "FABRIC_E_APPLICATION_TYPE_IN_USE" - - "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" - - "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" - - "FABRIC_E_FABRIC_VERSION_IN_USE" - - "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" - - "FABRIC_E_NAME_ALREADY_EXISTS" - - "FABRIC_E_NAME_NOT_EMPTY" - - "FABRIC_E_PROPERTY_CHECK_FAILED" - - "FABRIC_E_SERVICE_METADATA_MISMATCH" - - "FABRIC_E_SERVICE_TYPE_MISMATCH" - - "FABRIC_E_HEALTH_STALE_REPORT" - - "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" - - "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" - - "FABRIC_E_INSTANCE_ID_MISMATCH" - - "FABRIC_E_BACKUP_IN_PROGRESS" - - "FABRIC_E_RESTORE_IN_PROGRESS" - - "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" - - Possible values of the error code for HTTP status code 413 (Request - Entity Too Large) - - "FABRIC_E_VALUE_TOO_LARGE" - - Possible values of the error code for HTTP status code 500 (Internal - Server Error) - - "FABRIC_E_NODE_IS_UP" - - "E_FAIL" - - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" - - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" - - "FABRIC_E_VOLUME_ALREADY_EXISTS" - - "FABRIC_E_VOLUME_NOT_FOUND" - - "SerializationError" - - Possible values of the error code for HTTP status code 503 (Service - Unavailable) - - "FABRIC_E_NO_WRITE_QUORUM" - - "FABRIC_E_NOT_PRIMARY" - - "FABRIC_E_NOT_READY" - - "FABRIC_E_RECONFIGURATION_PENDING" - - "FABRIC_E_SERVICE_OFFLINE" - - "E_ABORT" - - "FABRIC_E_VALUE_TOO_LARGE" - - Possible values of the error code for HTTP status code 504 (Gateway - Timeout) - - "FABRIC_E_COMMUNICATION_ERROR" - - "FABRIC_E_OPERATION_NOT_COMPLETE" - - "FABRIC_E_TIMEOUT". Possible values include: - 'FABRIC_E_INVALID_PARTITION_KEY', - 'FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR', 'FABRIC_E_INVALID_ADDRESS', - 'FABRIC_E_APPLICATION_NOT_UPGRADING', - 'FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR', - 'FABRIC_E_FABRIC_NOT_UPGRADING', - 'FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR', - 'FABRIC_E_INVALID_CONFIGURATION', 'FABRIC_E_INVALID_NAME_URI', - 'FABRIC_E_PATH_TOO_LONG', 'FABRIC_E_KEY_TOO_LARGE', - 'FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED', - 'FABRIC_E_INVALID_ATOMIC_GROUP', 'FABRIC_E_VALUE_EMPTY', - 'FABRIC_E_NODE_NOT_FOUND', 'FABRIC_E_APPLICATION_TYPE_NOT_FOUND', - 'FABRIC_E_APPLICATION_NOT_FOUND', 'FABRIC_E_SERVICE_TYPE_NOT_FOUND', - 'FABRIC_E_SERVICE_DOES_NOT_EXIST', - 'FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND', - 'FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND', - 'FABRIC_E_PARTITION_NOT_FOUND', 'FABRIC_E_REPLICA_DOES_NOT_EXIST', - 'FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST', - 'FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND', - 'FABRIC_E_DIRECTORY_NOT_FOUND', 'FABRIC_E_FABRIC_VERSION_NOT_FOUND', - 'FABRIC_E_FILE_NOT_FOUND', 'FABRIC_E_NAME_DOES_NOT_EXIST', - 'FABRIC_E_PROPERTY_DOES_NOT_EXIST', 'FABRIC_E_ENUMERATION_COMPLETED', - 'FABRIC_E_SERVICE_MANIFEST_NOT_FOUND', 'FABRIC_E_KEY_NOT_FOUND', - 'FABRIC_E_HEALTH_ENTITY_NOT_FOUND', - 'FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS', - 'FABRIC_E_APPLICATION_ALREADY_EXISTS', - 'FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION', - 'FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS', - 'FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS', - 'FABRIC_E_SERVICE_ALREADY_EXISTS', - 'FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS', - 'FABRIC_E_APPLICATION_TYPE_IN_USE', - 'FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION', - 'FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS', - 'FABRIC_E_FABRIC_VERSION_IN_USE', 'FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS', - 'FABRIC_E_NAME_ALREADY_EXISTS', 'FABRIC_E_NAME_NOT_EMPTY', - 'FABRIC_E_PROPERTY_CHECK_FAILED', 'FABRIC_E_SERVICE_METADATA_MISMATCH', - 'FABRIC_E_SERVICE_TYPE_MISMATCH', 'FABRIC_E_HEALTH_STALE_REPORT', - 'FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED', - 'FABRIC_E_NODE_HAS_NOT_STOPPED_YET', 'FABRIC_E_INSTANCE_ID_MISMATCH', - 'FABRIC_E_VALUE_TOO_LARGE', 'FABRIC_E_NO_WRITE_QUORUM', - 'FABRIC_E_NOT_PRIMARY', 'FABRIC_E_NOT_READY', - 'FABRIC_E_RECONFIGURATION_PENDING', 'FABRIC_E_SERVICE_OFFLINE', 'E_ABORT', - 'FABRIC_E_COMMUNICATION_ERROR', 'FABRIC_E_OPERATION_NOT_COMPLETE', - 'FABRIC_E_TIMEOUT', 'FABRIC_E_NODE_IS_UP', 'E_FAIL', - 'FABRIC_E_BACKUP_IS_ENABLED', - 'FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH', - 'FABRIC_E_INVALID_FOR_STATELESS_SERVICES', 'FABRIC_E_BACKUP_NOT_ENABLED', - 'FABRIC_E_BACKUP_POLICY_NOT_EXISTING', - 'FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING', - 'FABRIC_E_BACKUP_IN_PROGRESS', 'FABRIC_E_RESTORE_IN_PROGRESS', - 'FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING', - 'FABRIC_E_INVALID_SERVICE_SCALING_POLICY', 'E_INVALIDARG', - 'FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS', - 'FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND', - 'FABRIC_E_VOLUME_ALREADY_EXISTS', 'FABRIC_E_VOLUME_NOT_FOUND', - 'SerializationError', 'FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR' + :param code: Required. Defines the fabric error codes that be returned as part of the error + object in response to Service Fabric API operations that are not successful. Following are the + error code values that can be returned for a specific HTTP status code. + + + * + Possible values of the error code for HTTP status code 400 (Bad Request) + + + * "FABRIC_E_INVALID_PARTITION_KEY" + * "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" + * "FABRIC_E_INVALID_ADDRESS" + * "FABRIC_E_APPLICATION_NOT_UPGRADING" + * "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" + * "FABRIC_E_FABRIC_NOT_UPGRADING" + * "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" + * "FABRIC_E_INVALID_CONFIGURATION" + * "FABRIC_E_INVALID_NAME_URI" + * "FABRIC_E_PATH_TOO_LONG" + * "FABRIC_E_KEY_TOO_LARGE" + * "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" + * "FABRIC_E_INVALID_ATOMIC_GROUP" + * "FABRIC_E_VALUE_EMPTY" + * "FABRIC_E_BACKUP_IS_ENABLED" + * "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + * "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + * "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + * "E_INVALIDARG" + + * + Possible values of the error code for HTTP status code 404 (Not Found) + + + * "FABRIC_E_NODE_NOT_FOUND" + * "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" + * "FABRIC_E_APPLICATION_NOT_FOUND" + * "FABRIC_E_SERVICE_TYPE_NOT_FOUND" + * "FABRIC_E_SERVICE_DOES_NOT_EXIST" + * "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" + * "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" + * "FABRIC_E_PARTITION_NOT_FOUND" + * "FABRIC_E_REPLICA_DOES_NOT_EXIST" + * "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" + * "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" + * "FABRIC_E_DIRECTORY_NOT_FOUND" + * "FABRIC_E_FABRIC_VERSION_NOT_FOUND" + * "FABRIC_E_FILE_NOT_FOUND" + * "FABRIC_E_NAME_DOES_NOT_EXIST" + * "FABRIC_E_PROPERTY_DOES_NOT_EXIST" + * "FABRIC_E_ENUMERATION_COMPLETED" + * "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" + * "FABRIC_E_KEY_NOT_FOUND" + * "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + * "FABRIC_E_BACKUP_NOT_ENABLED" + * "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + * "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" + * "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" + + * + Possible values of the error code for HTTP status code 409 (Conflict) + + + * "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" + * "FABRIC_E_APPLICATION_ALREADY_EXISTS" + * "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" + * "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" + * "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" + * "FABRIC_E_SERVICE_ALREADY_EXISTS" + * "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" + * "FABRIC_E_APPLICATION_TYPE_IN_USE" + * "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" + * "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" + * "FABRIC_E_FABRIC_VERSION_IN_USE" + * "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" + * "FABRIC_E_NAME_ALREADY_EXISTS" + * "FABRIC_E_NAME_NOT_EMPTY" + * "FABRIC_E_PROPERTY_CHECK_FAILED" + * "FABRIC_E_SERVICE_METADATA_MISMATCH" + * "FABRIC_E_SERVICE_TYPE_MISMATCH" + * "FABRIC_E_HEALTH_STALE_REPORT" + * "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" + * "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" + * "FABRIC_E_INSTANCE_ID_MISMATCH" + * "FABRIC_E_BACKUP_IN_PROGRESS" + * "FABRIC_E_RESTORE_IN_PROGRESS" + * "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" + + * + Possible values of the error code for HTTP status code 413 (Request Entity Too Large) + + + * "FABRIC_E_VALUE_TOO_LARGE" + + * + Possible values of the error code for HTTP status code 500 (Internal Server Error) + + + * "FABRIC_E_NODE_IS_UP" + * "E_FAIL" + * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" + * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" + * "FABRIC_E_VOLUME_ALREADY_EXISTS" + * "FABRIC_E_VOLUME_NOT_FOUND" + * "SerializationError" + + * + Possible values of the error code for HTTP status code 503 (Service Unavailable) + + + * "FABRIC_E_NO_WRITE_QUORUM" + * "FABRIC_E_NOT_PRIMARY" + * "FABRIC_E_NOT_READY" + * "FABRIC_E_RECONFIGURATION_PENDING" + * "FABRIC_E_SERVICE_OFFLINE" + * "E_ABORT" + * "FABRIC_E_VALUE_TOO_LARGE" + + * + Possible values of the error code for HTTP status code 504 (Gateway Timeout) + + + * "FABRIC_E_COMMUNICATION_ERROR" + * "FABRIC_E_OPERATION_NOT_COMPLETE" + * "FABRIC_E_TIMEOUT". Possible values include: "FABRIC_E_INVALID_PARTITION_KEY", + "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR", "FABRIC_E_INVALID_ADDRESS", + "FABRIC_E_APPLICATION_NOT_UPGRADING", "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR", + "FABRIC_E_FABRIC_NOT_UPGRADING", "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR", + "FABRIC_E_INVALID_CONFIGURATION", "FABRIC_E_INVALID_NAME_URI", "FABRIC_E_PATH_TOO_LONG", + "FABRIC_E_KEY_TOO_LARGE", "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED", + "FABRIC_E_INVALID_ATOMIC_GROUP", "FABRIC_E_VALUE_EMPTY", "FABRIC_E_NODE_NOT_FOUND", + "FABRIC_E_APPLICATION_TYPE_NOT_FOUND", "FABRIC_E_APPLICATION_NOT_FOUND", + "FABRIC_E_SERVICE_TYPE_NOT_FOUND", "FABRIC_E_SERVICE_DOES_NOT_EXIST", + "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND", "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND", + "FABRIC_E_PARTITION_NOT_FOUND", "FABRIC_E_REPLICA_DOES_NOT_EXIST", + "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST", "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND", + "FABRIC_E_DIRECTORY_NOT_FOUND", "FABRIC_E_FABRIC_VERSION_NOT_FOUND", "FABRIC_E_FILE_NOT_FOUND", + "FABRIC_E_NAME_DOES_NOT_EXIST", "FABRIC_E_PROPERTY_DOES_NOT_EXIST", + "FABRIC_E_ENUMERATION_COMPLETED", "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND", + "FABRIC_E_KEY_NOT_FOUND", "FABRIC_E_HEALTH_ENTITY_NOT_FOUND", + "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS", "FABRIC_E_APPLICATION_ALREADY_EXISTS", + "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION", + "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS", "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS", + "FABRIC_E_SERVICE_ALREADY_EXISTS", "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS", + "FABRIC_E_APPLICATION_TYPE_IN_USE", "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION", + "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS", "FABRIC_E_FABRIC_VERSION_IN_USE", + "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS", "FABRIC_E_NAME_ALREADY_EXISTS", + "FABRIC_E_NAME_NOT_EMPTY", "FABRIC_E_PROPERTY_CHECK_FAILED", + "FABRIC_E_SERVICE_METADATA_MISMATCH", "FABRIC_E_SERVICE_TYPE_MISMATCH", + "FABRIC_E_HEALTH_STALE_REPORT", "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED", + "FABRIC_E_NODE_HAS_NOT_STOPPED_YET", "FABRIC_E_INSTANCE_ID_MISMATCH", + "FABRIC_E_VALUE_TOO_LARGE", "FABRIC_E_NO_WRITE_QUORUM", "FABRIC_E_NOT_PRIMARY", + "FABRIC_E_NOT_READY", "FABRIC_E_RECONFIGURATION_PENDING", "FABRIC_E_SERVICE_OFFLINE", + "E_ABORT", "FABRIC_E_COMMUNICATION_ERROR", "FABRIC_E_OPERATION_NOT_COMPLETE", + "FABRIC_E_TIMEOUT", "FABRIC_E_NODE_IS_UP", "E_FAIL", "FABRIC_E_BACKUP_IS_ENABLED", + "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH", "FABRIC_E_INVALID_FOR_STATELESS_SERVICES", + "FABRIC_E_BACKUP_NOT_ENABLED", "FABRIC_E_BACKUP_POLICY_NOT_EXISTING", + "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING", "FABRIC_E_BACKUP_IN_PROGRESS", + "FABRIC_E_RESTORE_IN_PROGRESS", "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING", + "FABRIC_E_INVALID_SERVICE_SCALING_POLICY", "E_INVALIDARG", + "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS", + "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND", "FABRIC_E_VOLUME_ALREADY_EXISTS", + "FABRIC_E_VOLUME_NOT_FOUND", "SerializationError", + "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR". :type code: str or ~azure.servicefabric.models.FabricErrorCodes :param message: Error message. :type message: str @@ -10056,22 +10898,27 @@ class FabricErrorError(Model): 'message': {'key': 'Message', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FabricErrorError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) + self.code = kwargs['code'] self.message = kwargs.get('message', None) -class PropertyBatchInfo(Model): +class PropertyBatchInfo(msrest.serialization.Model): """Information about the results of a property batch. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SuccessfulPropertyBatchInfo, FailedPropertyBatchInfo + sub-classes are: FailedPropertyBatchInfo, SuccessfulPropertyBatchInfo. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property batch info, determined by the results of a property + batch. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Successful", "Failed". + :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind """ _validation = { @@ -10083,28 +10930,30 @@ class PropertyBatchInfo(Model): } _subtype_map = { - 'kind': {'Successful': 'SuccessfulPropertyBatchInfo', 'Failed': 'FailedPropertyBatchInfo'} + 'kind': {'Failed': 'FailedPropertyBatchInfo', 'Successful': 'SuccessfulPropertyBatchInfo'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PropertyBatchInfo, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class FailedPropertyBatchInfo(PropertyBatchInfo): - """Derived from PropertyBatchInfo. Represents the property batch failing. - Contains information about the specific batch failure. + """Derived from PropertyBatchInfo. Represents the property batch failing. Contains information about the specific batch failure. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param error_message: The error message of the failed operation. Describes - the exception thrown due to the first unsuccessful operation in the - property batch. + :param kind: Required. The kind of property batch info, determined by the results of a property + batch. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Successful", "Failed". + :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind + :param error_message: The error message of the failed operation. Describes the exception thrown + due to the first unsuccessful operation in the property batch. :type error_message: str - :param operation_index: The index of the unsuccessful operation in the - property batch. + :param operation_index: The index of the unsuccessful operation in the property batch. :type operation_index: int """ @@ -10118,23 +10967,23 @@ class FailedPropertyBatchInfo(PropertyBatchInfo): 'operation_index': {'key': 'OperationIndex', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FailedPropertyBatchInfo, self).__init__(**kwargs) + self.kind = 'Failed' # type: str self.error_message = kwargs.get('error_message', None) self.operation_index = kwargs.get('operation_index', None) - self.kind = 'Failed' -class FailedUpgradeDomainProgressObject(Model): - """The detailed upgrade progress for nodes in the current upgrade domain at - the point of failure. +class FailedUpgradeDomainProgressObject(msrest.serialization.Model): + """The detailed upgrade progress for nodes in the current upgrade domain at the point of failure. - :param domain_name: The name of the upgrade domain + :param domain_name: The name of the upgrade domain. :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their - statuses - :type node_upgrade_progress_list: - list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their statuses. + :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -10142,22 +10991,22 @@ class FailedUpgradeDomainProgressObject(Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FailedUpgradeDomainProgressObject, self).__init__(**kwargs) self.domain_name = kwargs.get('domain_name', None) self.node_upgrade_progress_list = kwargs.get('node_upgrade_progress_list', None) -class FailureUpgradeDomainProgressInfo(Model): - """Information about the upgrade domain progress at the time of upgrade - failure. +class FailureUpgradeDomainProgressInfo(msrest.serialization.Model): + """Information about the upgrade domain progress at the time of upgrade failure. - :param domain_name: The name of the upgrade domain + :param domain_name: The name of the upgrade domain. :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their - statuses - :type node_upgrade_progress_list: - list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their statuses. + :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -10165,24 +11014,25 @@ class FailureUpgradeDomainProgressInfo(Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FailureUpgradeDomainProgressInfo, self).__init__(**kwargs) self.domain_name = kwargs.get('domain_name', None) self.node_upgrade_progress_list = kwargs.get('node_upgrade_progress_list', None) -class FileInfo(Model): +class FileInfo(msrest.serialization.Model): """Information about a image store file. :param file_size: The size of file in bytes. :type file_size: str :param file_version: Information about the version of image store file. :type file_version: ~azure.servicefabric.models.FileVersion - :param modified_date: The date and time when the image store file was last - modified. - :type modified_date: datetime - :param store_relative_path: The file path relative to the image store root - path. + :param modified_date: The date and time when the image store file was last modified. + :type modified_date: ~datetime.datetime + :param store_relative_path: The file path relative to the image store root path. :type store_relative_path: str """ @@ -10193,7 +11043,10 @@ class FileInfo(Model): 'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FileInfo, self).__init__(**kwargs) self.file_size = kwargs.get('file_size', None) self.file_version = kwargs.get('file_version', None) @@ -10202,17 +11055,17 @@ def __init__(self, **kwargs): class FileShareBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for file share storage used for storing or - enumerating backups. + """Describes the parameters for file share storage used for storing or enumerating backups. All required parameters must be populated in order to send to Azure. + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_kind: Required. Constant filled by server. - :type storage_kind: str - :param path: Required. UNC path of the file share where to store or - enumerate backups from. + :param path: Required. UNC path of the file share where to store or enumerate backups from. :type path: str :param primary_user_name: Primary user name to access the file share. :type primary_user_name: str @@ -10220,7 +11073,7 @@ class FileShareBackupStorageDescription(BackupStorageDescription): :type primary_password: str :param secondary_user_name: Secondary user name to access the file share. :type secondary_user_name: str - :param secondary_password: Secondary password to access the share location + :param secondary_password: Secondary password to access the share location. :type secondary_password: str """ @@ -10230,8 +11083,8 @@ class FileShareBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'path': {'key': 'Path', 'type': 'str'}, 'primary_user_name': {'key': 'PrimaryUserName', 'type': 'str'}, 'primary_password': {'key': 'PrimaryPassword', 'type': 'str'}, @@ -10239,27 +11092,30 @@ class FileShareBackupStorageDescription(BackupStorageDescription): 'secondary_password': {'key': 'SecondaryPassword', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FileShareBackupStorageDescription, self).__init__(**kwargs) - self.path = kwargs.get('path', None) + self.storage_kind = 'FileShare' # type: str + self.path = kwargs['path'] self.primary_user_name = kwargs.get('primary_user_name', None) self.primary_password = kwargs.get('primary_password', None) self.secondary_user_name = kwargs.get('secondary_user_name', None) self.secondary_password = kwargs.get('secondary_password', None) - self.storage_kind = 'FileShare' -class FileVersion(Model): +class FileVersion(msrest.serialization.Model): """Information about the version of image store file. - :param version_number: The current image store version number for the file - is used in image store for checking whether it need to be updated. + :param version_number: The current image store version number for the file is used in image + store for checking whether it need to be updated. :type version_number: str - :param epoch_data_loss_number: The epoch data loss number of image store - replica when this file entry was updated or created. + :param epoch_data_loss_number: The epoch data loss number of image store replica when this file + entry was updated or created. :type epoch_data_loss_number: str - :param epoch_configuration_number: The epoch configuration version number - of the image store replica when this file entry was created or updated. + :param epoch_configuration_number: The epoch configuration version number of the image store + replica when this file entry was created or updated. :type epoch_configuration_number: str """ @@ -10269,19 +11125,21 @@ class FileVersion(Model): 'epoch_configuration_number': {'key': 'EpochConfigurationNumber', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FileVersion, self).__init__(**kwargs) self.version_number = kwargs.get('version_number', None) self.epoch_data_loss_number = kwargs.get('epoch_data_loss_number', None) self.epoch_configuration_number = kwargs.get('epoch_configuration_number', None) -class FolderInfo(Model): - """Information about a image store folder. It includes how many files this - folder contains and its image store relative path. +class FolderInfo(msrest.serialization.Model): + """Information about a image store folder. It includes how many files this folder contains and its image store relative path. - :param store_relative_path: The remote location within image store. This - path is relative to the image store root. + :param store_relative_path: The remote location within image store. This path is relative to + the image store root. :type store_relative_path: str :param file_count: The number of files from within the image store folder. :type file_count: str @@ -10292,17 +11150,20 @@ class FolderInfo(Model): 'file_count': {'key': 'FileCount', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FolderInfo, self).__init__(**kwargs) self.store_relative_path = kwargs.get('store_relative_path', None) self.file_count = kwargs.get('file_count', None) -class FolderSizeInfo(Model): +class FolderSizeInfo(msrest.serialization.Model): """Information of a image store folder size. - :param store_relative_path: The remote location within image store. This - path is relative to the image store root. + :param store_relative_path: The remote location within image store. This path is relative to + the image store root. :type store_relative_path: str :param folder_size: The size of folder in bytes. :type folder_size: str @@ -10313,7 +11174,10 @@ class FolderSizeInfo(Model): 'folder_size': {'key': 'FolderSize', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FolderSizeInfo, self).__init__(**kwargs) self.store_relative_path = kwargs.get('store_relative_path', None) self.folder_size = kwargs.get('folder_size', None) @@ -10324,12 +11188,14 @@ class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. Constant filled by server. - :type schedule_kind: str - :param interval: Required. Defines the interval with which backups are - periodically taken. It should be specified in ISO8601 format. Timespan in - seconds is not supported and will be ignored while creating the policy. - :type interval: timedelta + :param schedule_kind: Required. The kind of backup schedule, time based or frequency + based.Constant filled by server. Possible values include: "Invalid", "TimeBased", + "FrequencyBased". + :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind + :param interval: Required. Defines the interval with which backups are periodically taken. It + should be specified in ISO8601 format. Timespan in seconds is not supported and will be ignored + while creating the policy. + :type interval: ~datetime.timedelta """ _validation = { @@ -10342,19 +11208,21 @@ class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): 'interval': {'key': 'Interval', 'type': 'duration'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(FrequencyBasedBackupScheduleDescription, self).__init__(**kwargs) - self.interval = kwargs.get('interval', None) - self.schedule_kind = 'FrequencyBased' + self.schedule_kind = 'FrequencyBased' # type: str + self.interval = kwargs['interval'] -class GatewayDestination(Model): +class GatewayDestination(msrest.serialization.Model): """Describes destination endpoint for routing traffic. All required parameters must be populated in order to send to Azure. - :param application_name: Required. Name of the service fabric Mesh - application. + :param application_name: Required. Name of the service fabric Mesh application. :type application_name: str :param service_name: Required. service that contains the endpoint. :type service_name: str @@ -10374,18 +11242,20 @@ class GatewayDestination(Model): 'endpoint_name': {'key': 'endpointName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(GatewayDestination, self).__init__(**kwargs) - self.application_name = kwargs.get('application_name', None) - self.service_name = kwargs.get('service_name', None) - self.endpoint_name = kwargs.get('endpoint_name', None) + self.application_name = kwargs['application_name'] + self.service_name = kwargs['service_name'] + self.endpoint_name = kwargs['endpoint_name'] -class GatewayResourceDescription(Model): +class GatewayResourceDescription(msrest.serialization.Model): """This type describes a gateway resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. @@ -10393,24 +11263,21 @@ class GatewayResourceDescription(Model): :type name: str :param description: User readable description of the gateway. :type description: str - :param source_network: Required. Network the gateway should listen on for - requests. + :param source_network: Required. Network the gateway should listen on for requests. :type source_network: ~azure.servicefabric.models.NetworkRef - :param destination_network: Required. Network that the Application is - using. + :param destination_network: Required. Network that the Application is using. :type destination_network: ~azure.servicefabric.models.NetworkRef :param tcp: Configuration for tcp connectivity for this gateway. :type tcp: list[~azure.servicefabric.models.TcpConfig] :param http: Configuration for http connectivity for this gateway. :type http: list[~azure.servicefabric.models.HttpConfig] - :ivar status: Status of the resource. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the gateway. + :ivar status_details: Gives additional information about the current status of the gateway. :vartype status_details: str - :ivar ip_address: IP address of the gateway. This is populated in the - response and is ignored for incoming requests. + :ivar ip_address: IP address of the gateway. This is populated in the response and is ignored + for incoming requests. :vartype ip_address: str """ @@ -10435,12 +11302,15 @@ class GatewayResourceDescription(Model): 'ip_address': {'key': 'properties.ipAddress', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(GatewayResourceDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.description = kwargs.get('description', None) - self.source_network = kwargs.get('source_network', None) - self.destination_network = kwargs.get('destination_network', None) + self.source_network = kwargs['source_network'] + self.destination_network = kwargs['destination_network'] self.tcp = kwargs.get('tcp', None) self.http = kwargs.get('http', None) self.status = None @@ -10448,33 +11318,27 @@ def __init__(self, **kwargs): self.ip_address = None -class GetBackupByStorageQueryDescription(Model): - """Describes additional filters to be applied, while listing backups, and - backup storage details from where to fetch the backups. +class GetBackupByStorageQueryDescription(msrest.serialization.Model): + """Describes additional filters to be applied, while listing backups, and backup storage details from where to fetch the backups. All required parameters must be populated in order to send to Azure. - :param start_date_time_filter: Specifies the start date time in ISO8601 - from which to enumerate backups. If not specified, backups are enumerated - from the beginning. - :type start_date_time_filter: datetime - :param end_date_time_filter: Specifies the end date time in ISO8601 till - which to enumerate backups. If not specified, backups are enumerated till - the end. - :type end_date_time_filter: datetime - :param latest: If specified as true, gets the most recent backup (within - the specified time range) for every partition under the specified backup - entity. Default value: False . + :param start_date_time_filter: Specifies the start date time in ISO8601 from which to enumerate + backups. If not specified, backups are enumerated from the beginning. + :type start_date_time_filter: ~datetime.datetime + :param end_date_time_filter: Specifies the end date time in ISO8601 till which to enumerate + backups. If not specified, backups are enumerated till the end. + :type end_date_time_filter: ~datetime.datetime + :param latest: If specified as true, gets the most recent backup (within the specified time + range) for every partition under the specified backup entity. :type latest: bool - :param storage: Required. Describes the parameters for the backup storage - from where to enumerate backups. This is optional and by default backups - are enumerated from the backup storage where this backup entity is - currently being backed up (as specified in backup policy). This parameter - is useful to be able to enumerate backups from another cluster where you - may intend to restore. + :param storage: Required. Describes the parameters for the backup storage from where to + enumerate backups. This is optional and by default backups are enumerated from the backup + storage where this backup entity is currently being backed up (as specified in backup policy). + This parameter is useful to be able to enumerate backups from another cluster where you may + intend to restore. :type storage: ~azure.servicefabric.models.BackupStorageDescription - :param backup_entity: Required. Indicates the entity for which to - enumerate backups. + :param backup_entity: Required. Indicates the entity for which to enumerate backups. :type backup_entity: ~azure.servicefabric.models.BackupEntity """ @@ -10491,49 +11355,54 @@ class GetBackupByStorageQueryDescription(Model): 'backup_entity': {'key': 'BackupEntity', 'type': 'BackupEntity'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(GetBackupByStorageQueryDescription, self).__init__(**kwargs) self.start_date_time_filter = kwargs.get('start_date_time_filter', None) self.end_date_time_filter = kwargs.get('end_date_time_filter', None) self.latest = kwargs.get('latest', False) - self.storage = kwargs.get('storage', None) - self.backup_entity = kwargs.get('backup_entity', None) + self.storage = kwargs['storage'] + self.backup_entity = kwargs['backup_entity'] class GetPropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that gets the specified property if it - exists. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that gets the specified property if it exists. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str - :param include_value: Whether or not to return the property value with the - metadata. - True if values should be returned with the metadata; False to return only - property metadata. Default value: False . + :param include_value: Whether or not to return the property value with the metadata. + True if values should be returned with the metadata; False to return only property metadata. :type include_value: bool """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'include_value': {'key': 'IncludeValue', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(GetPropertyBatchOperation, self).__init__(**kwargs) + self.kind = 'Get' # type: str self.include_value = kwargs.get('include_value', False) - self.kind = 'Get' class GuidPropertyValue(PropertyValue): @@ -10541,8 +11410,10 @@ class GuidPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind :param data: Required. The data of the property value. :type data: str """ @@ -10557,18 +11428,20 @@ class GuidPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(GuidPropertyValue, self).__init__(**kwargs) - self.data = kwargs.get('data', None) - self.kind = 'Guid' + self.kind = 'Guid' # type: str + self.data = kwargs['data'] -class HealthEvaluationWrapper(Model): +class HealthEvaluationWrapper(msrest.serialization.Model): """Wrapper object for health evaluation. - :param health_evaluation: Represents a health evaluation which describes - the data and the algorithm used by health manager to evaluate the health - of an entity. + :param health_evaluation: Represents a health evaluation which describes the data and the + algorithm used by health manager to evaluate the health of an entity. :type health_evaluation: ~azure.servicefabric.models.HealthEvaluation """ @@ -10576,86 +11449,79 @@ class HealthEvaluationWrapper(Model): 'health_evaluation': {'key': 'HealthEvaluation', 'type': 'HealthEvaluation'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HealthEvaluationWrapper, self).__init__(**kwargs) self.health_evaluation = kwargs.get('health_evaluation', None) -class HealthInformation(Model): - """Represents common health report information. It is included in all health - reports sent to health store and in all health events returned by health - queries. +class HealthInformation(msrest.serialization.Model): + """Represents common health report information. It is included in all health reports sent to health store and in all health events returned by health queries. All required parameters must be populated in order to send to Azure. - :param source_id: Required. The source name that identifies the - client/watchdog/system component that generated the health information. + :param source_id: Required. The source name that identifies the client/watchdog/system + component that generated the health information. :type source_id: str - :param property: Required. The property of the health information. An - entity can have health reports for different properties. - The property is a string and not a fixed enumeration to allow the reporter - flexibility to categorize the state condition that triggers the report. - For example, a reporter with SourceId "LocalWatchdog" can monitor the - state of the available disk on a node, + :param property: Required. The property of the health information. An entity can have health + reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter flexibility to + categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the state of the available + disk on a node, so it can report "AvailableDisk" property on that node. - The same reporter can monitor the node connectivity, so it can report a - property "Connectivity" on the same node. - In the health store, these reports are treated as separate health events - for the specified node. - Together with the SourceId, the property uniquely identifies the health - information. + The same reporter can monitor the node connectivity, so it can report a property + "Connectivity" on the same node. + In the health store, these reports are treated as separate health events for the specified + node. + + Together with the SourceId, the property uniquely identifies the health information. :type property: str - :param health_state: Required. The health state of a Service Fabric entity - such as Cluster, Node, Application, Service, Partition, Replica etc. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: Required. The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param time_to_live_in_milli_seconds: The duration for which this health - report is valid. This field uses ISO8601 format for specifying the - duration. - When clients report periodically, they should send reports with higher - frequency than time to live. - If clients report on transition, they can set the time to live to - infinite. - When time to live expires, the health event that contains the health - information - is either removed from health store, if RemoveWhenExpired is true, or - evaluated at error, if RemoveWhenExpired false. + :param time_to_live_in_milli_seconds: The duration for which this health report is valid. This + field uses ISO8601 format for specifying the duration. + When clients report periodically, they should send reports with higher frequency than time to + live. + If clients report on transition, they can set the time to live to infinite. + When time to live expires, the health event that contains the health information + is either removed from health store, if RemoveWhenExpired is true, or evaluated at error, if + RemoveWhenExpired false. + If not specified, time to live defaults to infinite value. - :type time_to_live_in_milli_seconds: timedelta - :param description: The description of the health information. It - represents free text used to add human readable information about the - report. + :type time_to_live_in_milli_seconds: ~datetime.timedelta + :param description: The description of the health information. It represents free text used to + add human readable information about the report. The maximum string length for the description is 4096 characters. If the provided string is longer, it will be automatically truncated. - When truncated, the last characters of the description contain a marker - "[Truncated]", and total string size is 4096 characters. + When truncated, the last characters of the description contain a marker "[Truncated]", and + total string size is 4096 characters. The presence of the marker indicates to users that truncation occurred. - Note that when truncated, the description has less than 4096 characters - from the original string. + Note that when truncated, the description has less than 4096 characters from the original + string. :type description: str - :param sequence_number: The sequence number for this health report as a - numeric string. - The report sequence number is used by the health store to detect stale - reports. - If not specified, a sequence number is auto-generated by the health client - when a report is added. + :param sequence_number: The sequence number for this health report as a numeric string. + The report sequence number is used by the health store to detect stale reports. + If not specified, a sequence number is auto-generated by the health client when a report is + added. :type sequence_number: str - :param remove_when_expired: Value that indicates whether the report is - removed from health store when it expires. - If set to true, the report is removed from the health store after it - expires. - If set to false, the report is treated as an error when expired. The value - of this property is false by default. - When clients report periodically, they should set RemoveWhenExpired false - (default). - This way, if the reporter has issues (e.g. deadlock) and can't report, the - entity is evaluated at error when the health report expires. + :param remove_when_expired: Value that indicates whether the report is removed from health + store when it expires. + If set to true, the report is removed from the health store after it expires. + If set to false, the report is treated as an error when expired. The value of this property is + false by default. + When clients report periodically, they should set RemoveWhenExpired false (default). + This way, if the reporter has issues (e.g. deadlock) and can't report, the entity is evaluated + at error when the health report expires. This flags the entity as being in Error health state. :type remove_when_expired: bool - :param health_report_id: A health report ID which identifies the health - report and can be used to find more detailed information about a specific - health event at - aka.ms/sfhealthid + :param health_report_id: A health report ID which identifies the health report and can be used + to find more detailed information about a specific health event at + aka.ms/sfhealthid. :type health_report_id: str """ @@ -10676,11 +11542,14 @@ class HealthInformation(Model): 'health_report_id': {'key': 'HealthReportId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HealthInformation, self).__init__(**kwargs) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] self.time_to_live_in_milli_seconds = kwargs.get('time_to_live_in_milli_seconds', None) self.description = kwargs.get('description', None) self.sequence_number = kwargs.get('sequence_number', None) @@ -10689,121 +11558,108 @@ def __init__(self, **kwargs): class HealthEvent(HealthInformation): - """Represents health information reported on a health entity, such as cluster, - application or node, with additional metadata added by the Health Manager. + """Represents health information reported on a health entity, such as cluster, application or node, with additional metadata added by the Health Manager. All required parameters must be populated in order to send to Azure. - :param source_id: Required. The source name that identifies the - client/watchdog/system component that generated the health information. + :param source_id: Required. The source name that identifies the client/watchdog/system + component that generated the health information. :type source_id: str - :param property: Required. The property of the health information. An - entity can have health reports for different properties. - The property is a string and not a fixed enumeration to allow the reporter - flexibility to categorize the state condition that triggers the report. - For example, a reporter with SourceId "LocalWatchdog" can monitor the - state of the available disk on a node, + :param property: Required. The property of the health information. An entity can have health + reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter flexibility to + categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the state of the available + disk on a node, so it can report "AvailableDisk" property on that node. - The same reporter can monitor the node connectivity, so it can report a - property "Connectivity" on the same node. - In the health store, these reports are treated as separate health events - for the specified node. - Together with the SourceId, the property uniquely identifies the health - information. + The same reporter can monitor the node connectivity, so it can report a property + "Connectivity" on the same node. + In the health store, these reports are treated as separate health events for the specified + node. + + Together with the SourceId, the property uniquely identifies the health information. :type property: str - :param health_state: Required. The health state of a Service Fabric entity - such as Cluster, Node, Application, Service, Partition, Replica etc. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: Required. The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param time_to_live_in_milli_seconds: The duration for which this health - report is valid. This field uses ISO8601 format for specifying the - duration. - When clients report periodically, they should send reports with higher - frequency than time to live. - If clients report on transition, they can set the time to live to - infinite. - When time to live expires, the health event that contains the health - information - is either removed from health store, if RemoveWhenExpired is true, or - evaluated at error, if RemoveWhenExpired false. + :param time_to_live_in_milli_seconds: The duration for which this health report is valid. This + field uses ISO8601 format for specifying the duration. + When clients report periodically, they should send reports with higher frequency than time to + live. + If clients report on transition, they can set the time to live to infinite. + When time to live expires, the health event that contains the health information + is either removed from health store, if RemoveWhenExpired is true, or evaluated at error, if + RemoveWhenExpired false. + If not specified, time to live defaults to infinite value. - :type time_to_live_in_milli_seconds: timedelta - :param description: The description of the health information. It - represents free text used to add human readable information about the - report. + :type time_to_live_in_milli_seconds: ~datetime.timedelta + :param description: The description of the health information. It represents free text used to + add human readable information about the report. The maximum string length for the description is 4096 characters. If the provided string is longer, it will be automatically truncated. - When truncated, the last characters of the description contain a marker - "[Truncated]", and total string size is 4096 characters. + When truncated, the last characters of the description contain a marker "[Truncated]", and + total string size is 4096 characters. The presence of the marker indicates to users that truncation occurred. - Note that when truncated, the description has less than 4096 characters - from the original string. + Note that when truncated, the description has less than 4096 characters from the original + string. :type description: str - :param sequence_number: The sequence number for this health report as a - numeric string. - The report sequence number is used by the health store to detect stale - reports. - If not specified, a sequence number is auto-generated by the health client - when a report is added. + :param sequence_number: The sequence number for this health report as a numeric string. + The report sequence number is used by the health store to detect stale reports. + If not specified, a sequence number is auto-generated by the health client when a report is + added. :type sequence_number: str - :param remove_when_expired: Value that indicates whether the report is - removed from health store when it expires. - If set to true, the report is removed from the health store after it - expires. - If set to false, the report is treated as an error when expired. The value - of this property is false by default. - When clients report periodically, they should set RemoveWhenExpired false - (default). - This way, if the reporter has issues (e.g. deadlock) and can't report, the - entity is evaluated at error when the health report expires. + :param remove_when_expired: Value that indicates whether the report is removed from health + store when it expires. + If set to true, the report is removed from the health store after it expires. + If set to false, the report is treated as an error when expired. The value of this property is + false by default. + When clients report periodically, they should set RemoveWhenExpired false (default). + This way, if the reporter has issues (e.g. deadlock) and can't report, the entity is evaluated + at error when the health report expires. This flags the entity as being in Error health state. :type remove_when_expired: bool - :param health_report_id: A health report ID which identifies the health - report and can be used to find more detailed information about a specific - health event at - aka.ms/sfhealthid + :param health_report_id: A health report ID which identifies the health report and can be used + to find more detailed information about a specific health event at + aka.ms/sfhealthid. :type health_report_id: str - :param is_expired: Returns true if the health event is expired, otherwise - false. + :param is_expired: Returns true if the health event is expired, otherwise false. :type is_expired: bool - :param source_utc_timestamp: The date and time when the health report was - sent by the source. - :type source_utc_timestamp: datetime - :param last_modified_utc_timestamp: The date and time when the health - report was last modified by the health store. - :type last_modified_utc_timestamp: datetime - :param last_ok_transition_at: If the current health state is 'Ok', this - property returns the time at which the health report was first reported - with 'Ok'. - For periodic reporting, many reports with the same state may have been - generated. - This property returns the date and time when the first 'Ok' health report - was received. - If the current health state is 'Error' or 'Warning', returns the date and - time at which the health state was last in 'Ok', before transitioning to a - different state. + :param source_utc_timestamp: The date and time when the health report was sent by the source. + :type source_utc_timestamp: ~datetime.datetime + :param last_modified_utc_timestamp: The date and time when the health report was last modified + by the health store. + :type last_modified_utc_timestamp: ~datetime.datetime + :param last_ok_transition_at: If the current health state is 'Ok', this property returns the + time at which the health report was first reported with 'Ok'. + For periodic reporting, many reports with the same state may have been generated. + This property returns the date and time when the first 'Ok' health report was received. + + If the current health state is 'Error' or 'Warning', returns the date and time at which the + health state was last in 'Ok', before transitioning to a different state. + If the health state was never 'Ok', the value will be zero date-time. - :type last_ok_transition_at: datetime - :param last_warning_transition_at: If the current health state is - 'Warning', this property returns the time at which the health report was - first reported with 'Warning'. For periodic reporting, many reports with - the same state may have been generated however, this property returns only - the date and time at the first 'Warning' health report was received. - If the current health state is 'Ok' or 'Error', returns the date and time - at which the health state was last in 'Warning', before transitioning to a - different state. + :type last_ok_transition_at: ~datetime.datetime + :param last_warning_transition_at: If the current health state is 'Warning', this property + returns the time at which the health report was first reported with 'Warning'. For periodic + reporting, many reports with the same state may have been generated however, this property + returns only the date and time at the first 'Warning' health report was received. + + If the current health state is 'Ok' or 'Error', returns the date and time at which the health + state was last in 'Warning', before transitioning to a different state. + If the health state was never 'Warning', the value will be zero date-time. - :type last_warning_transition_at: datetime - :param last_error_transition_at: If the current health state is 'Error', - this property returns the time at which the health report was first - reported with 'Error'. For periodic reporting, many reports with the same - state may have been generated however, this property returns only the date - and time at the first 'Error' health report was received. - If the current health state is 'Ok' or 'Warning', returns the date and - time at which the health state was last in 'Error', before transitioning - to a different state. + :type last_warning_transition_at: ~datetime.datetime + :param last_error_transition_at: If the current health state is 'Error', this property returns + the time at which the health report was first reported with 'Error'. For periodic reporting, + many reports with the same state may have been generated however, this property returns only + the date and time at the first 'Error' health report was received. + + If the current health state is 'Ok' or 'Warning', returns the date and time at which the + health state was last in 'Error', before transitioning to a different state. + If the health state was never 'Error', the value will be zero date-time. - :type last_error_transition_at: datetime + :type last_error_transition_at: ~datetime.datetime """ _validation = { @@ -10829,7 +11685,10 @@ class HealthEvent(HealthInformation): 'last_error_transition_at': {'key': 'LastErrorTransitionAt', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HealthEvent, self).__init__(**kwargs) self.is_expired = kwargs.get('is_expired', None) self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) @@ -10839,18 +11698,14 @@ def __init__(self, **kwargs): self.last_error_transition_at = kwargs.get('last_error_transition_at', None) -class HealthStateCount(Model): - """Represents information about how many health entities are in Ok, Warning - and Error health state. +class HealthStateCount(msrest.serialization.Model): + """Represents information about how many health entities are in Ok, Warning and Error health state. - :param ok_count: The number of health entities with aggregated health - state Ok. + :param ok_count: The number of health entities with aggregated health state Ok. :type ok_count: long - :param warning_count: The number of health entities with aggregated health - state Warning. + :param warning_count: The number of health entities with aggregated health state Warning. :type warning_count: long - :param error_count: The number of health entities with aggregated health - state Error. + :param error_count: The number of health entities with aggregated health state Error. :type error_count: long """ @@ -10866,49 +11721,48 @@ class HealthStateCount(Model): 'error_count': {'key': 'ErrorCount', 'type': 'long'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HealthStateCount, self).__init__(**kwargs) self.ok_count = kwargs.get('ok_count', None) self.warning_count = kwargs.get('warning_count', None) self.error_count = kwargs.get('error_count', None) -class HealthStatistics(Model): - """The health statistics of an entity, returned as part of the health query - result when the query description is configured to include statistics. - The statistics include health state counts for all children types of the - current entity. - For example, for cluster, the health statistics include health state counts - for nodes, applications, services, partitions, replicas, deployed - applications and deployed service packages. - For partition, the health statistics include health counts for replicas. +class HealthStatistics(msrest.serialization.Model): + """The health statistics of an entity, returned as part of the health query result when the query description is configured to include statistics. +The statistics include health state counts for all children types of the current entity. +For example, for cluster, the health statistics include health state counts for nodes, applications, services, partitions, replicas, deployed applications and deployed service packages. +For partition, the health statistics include health counts for replicas. - :param health_state_count_list: List of health state counts per entity - kind, which keeps track of how many children of the queried entity are in - Ok, Warning and Error state. - :type health_state_count_list: - list[~azure.servicefabric.models.EntityKindHealthStateCount] + :param health_state_count_list: List of health state counts per entity kind, which keeps track + of how many children of the queried entity are in Ok, Warning and Error state. + :type health_state_count_list: list[~azure.servicefabric.models.EntityKindHealthStateCount] """ _attribute_map = { 'health_state_count_list': {'key': 'HealthStateCountList', 'type': '[EntityKindHealthStateCount]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HealthStatistics, self).__init__(**kwargs) self.health_state_count_list = kwargs.get('health_state_count_list', None) -class HttpConfig(Model): - """Describes the http configuration for external connectivity for this - network. +class HttpConfig(msrest.serialization.Model): + """Describes the http configuration for external connectivity for this network. All required parameters must be populated in order to send to Azure. :param name: Required. http gateway config name. :type name: str - :param port: Required. Specifies the port at which the service endpoint - below needs to be exposed. + :param port: Required. Specifies the port at which the service endpoint below needs to be + exposed. :type port: int :param hosts: Required. description for routing. :type hosts: list[~azure.servicefabric.models.HttpHostConfig] @@ -10926,23 +11780,26 @@ class HttpConfig(Model): 'hosts': {'key': 'hosts', 'type': '[HttpHostConfig]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HttpConfig, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.port = kwargs.get('port', None) - self.hosts = kwargs.get('hosts', None) + self.name = kwargs['name'] + self.port = kwargs['port'] + self.hosts = kwargs['hosts'] -class HttpHostConfig(Model): +class HttpHostConfig(msrest.serialization.Model): """Describes the hostname properties for http routing. All required parameters must be populated in order to send to Azure. :param name: Required. http hostname config name. :type name: str - :param routes: Required. Route information to use for routing. Routes are - processed in the order they are specified. Specify routes that are more - specific before routes that can handle general cases. + :param routes: Required. Route information to use for routing. Routes are processed in the + order they are specified. Specify routes that are more specific before routes that can handle + general cases. :type routes: list[~azure.servicefabric.models.HttpRouteConfig] """ @@ -10956,13 +11813,16 @@ class HttpHostConfig(Model): 'routes': {'key': 'routes', 'type': '[HttpRouteConfig]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HttpHostConfig, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.routes = kwargs.get('routes', None) + self.name = kwargs['name'] + self.routes = kwargs['routes'] -class HttpRouteConfig(Model): +class HttpRouteConfig(msrest.serialization.Model): """Describes the hostname properties for http routing. All required parameters must be populated in order to send to Azure. @@ -10971,8 +11831,7 @@ class HttpRouteConfig(Model): :type name: str :param match: Required. Describes a rule for http route matching. :type match: ~azure.servicefabric.models.HttpRouteMatchRule - :param destination: Required. Describes destination endpoint for routing - traffic. + :param destination: Required. Describes destination endpoint for routing traffic. :type destination: ~azure.servicefabric.models.GatewayDestination """ @@ -10988,14 +11847,17 @@ class HttpRouteConfig(Model): 'destination': {'key': 'destination', 'type': 'GatewayDestination'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HttpRouteConfig, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.match = kwargs.get('match', None) - self.destination = kwargs.get('destination', None) + self.name = kwargs['name'] + self.match = kwargs['match'] + self.destination = kwargs['destination'] -class HttpRouteMatchHeader(Model): +class HttpRouteMatchHeader(msrest.serialization.Model): """Describes header information for http route matching. All required parameters must be populated in order to send to Azure. @@ -11004,7 +11866,7 @@ class HttpRouteMatchHeader(Model): :type name: str :param value: Value of header to match in request. :type value: str - :param type: how to match header value. Possible values include: 'exact' + :param type: how to match header value. Possible values include: "exact". :type type: str or ~azure.servicefabric.models.HeaderMatchType """ @@ -11018,33 +11880,32 @@ class HttpRouteMatchHeader(Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HttpRouteMatchHeader, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.value = kwargs.get('value', None) self.type = kwargs.get('type', None) -class HttpRouteMatchPath(Model): +class HttpRouteMatchPath(msrest.serialization.Model): """Path to match for routing. - Variables are only populated by the server, and will be ignored when - sending a request. - All required parameters must be populated in order to send to Azure. :param value: Required. Uri path to match for request. :type value: str :param rewrite: replacement string for matched part of the Uri. :type rewrite: str - :ivar type: Required. how to match value in the Uri. Default value: - "prefix" . - :vartype type: str + :param type: Required. how to match value in the Uri. Possible values include: "prefix". + :type type: str or ~azure.servicefabric.models.PathMatchType """ _validation = { 'value': {'required': True}, - 'type': {'required': True, 'constant': True}, + 'type': {'required': True}, } _attribute_map = { @@ -11053,15 +11914,17 @@ class HttpRouteMatchPath(Model): 'type': {'key': 'type', 'type': 'str'}, } - type = "prefix" - - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HttpRouteMatchPath, self).__init__(**kwargs) - self.value = kwargs.get('value', None) + self.value = kwargs['value'] self.rewrite = kwargs.get('rewrite', None) + self.type = kwargs['type'] -class HttpRouteMatchRule(Model): +class HttpRouteMatchRule(msrest.serialization.Model): """Describes a rule for http route matching. All required parameters must be populated in order to send to Azure. @@ -11081,32 +11944,32 @@ class HttpRouteMatchRule(Model): 'headers': {'key': 'headers', 'type': '[HttpRouteMatchHeader]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(HttpRouteMatchRule, self).__init__(**kwargs) - self.path = kwargs.get('path', None) + self.path = kwargs['path'] self.headers = kwargs.get('headers', None) -class IdentityDescription(Model): +class IdentityDescription(msrest.serialization.Model): """Information describing the identities associated with this application. All required parameters must be populated in order to send to Azure. - :param token_service_endpoint: the endpoint for the token service managing - this identity + :param token_service_endpoint: the endpoint for the token service managing this identity. :type token_service_endpoint: str - :param type: Required. the types of identities associated with this - resource; currently restricted to 'SystemAssigned and UserAssigned' + :param type: Required. the types of identities associated with this resource; currently + restricted to 'SystemAssigned and UserAssigned'. :type type: str - :param tenant_id: the identifier of the tenant containing the - application's identity. + :param tenant_id: the identifier of the tenant containing the application's identity. :type tenant_id: str - :param principal_id: the object identifier of the Service Principal of the - identity associated with this resource. + :param principal_id: the object identifier of the Service Principal of the identity associated + with this resource. :type principal_id: str :param user_assigned_identities: represents user assigned identities map. - :type user_assigned_identities: dict[str, - ~azure.servicefabric.models.IdentityItemDescription] + :type user_assigned_identities: dict[str, ~azure.servicefabric.models.IdentityItemDescription] """ _validation = { @@ -11121,23 +11984,26 @@ class IdentityDescription(Model): 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{IdentityItemDescription}'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(IdentityDescription, self).__init__(**kwargs) self.token_service_endpoint = kwargs.get('token_service_endpoint', None) - self.type = kwargs.get('type', None) + self.type = kwargs['type'] self.tenant_id = kwargs.get('tenant_id', None) self.principal_id = kwargs.get('principal_id', None) self.user_assigned_identities = kwargs.get('user_assigned_identities', None) -class IdentityItemDescription(Model): +class IdentityItemDescription(msrest.serialization.Model): """Describes a single user-assigned identity associated with the application. - :param principal_id: the object identifier of the Service Principal which - this identity represents. + :param principal_id: the object identifier of the Service Principal which this identity + represents. :type principal_id: str - :param client_id: the client identifier of the Service Principal which - this identity represents. + :param client_id: the client identifier of the Service Principal which this identity + represents. :type client_id: str """ @@ -11146,30 +12012,32 @@ class IdentityItemDescription(Model): 'client_id': {'key': 'clientId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(IdentityItemDescription, self).__init__(**kwargs) self.principal_id = kwargs.get('principal_id', None) self.client_id = kwargs.get('client_id', None) -class ImageRegistryCredential(Model): +class ImageRegistryCredential(msrest.serialization.Model): """Image registry credential. All required parameters must be populated in order to send to Azure. - :param server: Required. Docker image registry server, without protocol - such as `http` and `https`. + :param server: Required. Docker image registry server, without protocol such as ``http`` and + ``https``. :type server: str :param username: Required. The username for the private registry. :type username: str - :param password_type: The type of the image registry password being given - in password. Possible values include: 'ClearText', 'KeyVaultReference', - 'SecretValueReference'. Default value: "ClearText" . - :type password_type: str or - ~azure.servicefabric.models.ImageRegistryPasswordType - :param password: The password for the private registry. The password is - required for create or update operations, however it is not returned in - the get or list operations. Will be processed based on the type provided. + :param password_type: The type of the image registry password being given in password. Possible + values include: "ClearText", "KeyVaultReference", "SecretValueReference". Default value: + "ClearText". + :type password_type: str or ~azure.servicefabric.models.ImageRegistryPasswordType + :param password: The password for the private registry. The password is required for create or + update operations, however it is not returned in the get or list operations. Will be processed + based on the type provided. :type password: str """ @@ -11185,22 +12053,25 @@ class ImageRegistryCredential(Model): 'password': {'key': 'password', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ImageRegistryCredential, self).__init__(**kwargs) - self.server = kwargs.get('server', None) - self.username = kwargs.get('username', None) + self.server = kwargs['server'] + self.username = kwargs['username'] self.password_type = kwargs.get('password_type', "ClearText") self.password = kwargs.get('password', None) -class ImageStoreContent(Model): +class ImageStoreContent(msrest.serialization.Model): """Information about the image store content. - :param store_files: The list of image store file info objects represents - files found under the given image store relative path. + :param store_files: The list of image store file info objects represents files found under the + given image store relative path. :type store_files: list[~azure.servicefabric.models.FileInfo] - :param store_folders: The list of image store folder info objects - represents subfolders found under the given image store relative path. + :param store_folders: The list of image store folder info objects represents subfolders found + under the given image store relative path. :type store_folders: list[~azure.servicefabric.models.FolderInfo] """ @@ -11209,31 +12080,32 @@ class ImageStoreContent(Model): 'store_folders': {'key': 'StoreFolders', 'type': '[FolderInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ImageStoreContent, self).__init__(**kwargs) self.store_files = kwargs.get('store_files', None) self.store_folders = kwargs.get('store_folders', None) -class ImageStoreCopyDescription(Model): - """Information about how to copy image store content from one image store - relative path to another image store relative path. +class ImageStoreCopyDescription(msrest.serialization.Model): + """Information about how to copy image store content from one image store relative path to another image store relative path. All required parameters must be populated in order to send to Azure. - :param remote_source: Required. The relative path of source image store - content to be copied from. + :param remote_source: Required. The relative path of source image store content to be copied + from. :type remote_source: str - :param remote_destination: Required. The relative path of destination - image store content to be copied to. + :param remote_destination: Required. The relative path of destination image store content to be + copied to. :type remote_destination: str :param skip_files: The list of the file names to be skipped for copying. :type skip_files: list[str] - :param check_mark_file: Indicates whether to check mark file during - copying. The property is true if checking mark file is required, false - otherwise. The mark file is used to check whether the folder is well - constructed. If the property is true and mark file does not exist, the - copy is skipped. + :param check_mark_file: Indicates whether to check mark file during copying. The property is + true if checking mark file is required, false otherwise. The mark file is used to check whether + the folder is well constructed. If the property is true and mark file does not exist, the copy + is skipped. :type check_mark_file: bool """ @@ -11249,35 +12121,38 @@ class ImageStoreCopyDescription(Model): 'check_mark_file': {'key': 'CheckMarkFile', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ImageStoreCopyDescription, self).__init__(**kwargs) - self.remote_source = kwargs.get('remote_source', None) - self.remote_destination = kwargs.get('remote_destination', None) + self.remote_source = kwargs['remote_source'] + self.remote_destination = kwargs['remote_destination'] self.skip_files = kwargs.get('skip_files', None) self.check_mark_file = kwargs.get('check_mark_file', None) -class ImageStoreInfo(Model): +class ImageStoreInfo(msrest.serialization.Model): """Information about the ImageStore's resource usage. - :param disk_info: disk capacity and available disk space on the node where - the ImageStore primary is placed. + :param disk_info: disk capacity and available disk space on the node where the ImageStore + primary is placed. :type disk_info: ~azure.servicefabric.models.DiskInfo :param used_by_metadata: the ImageStore's file system usage for metadata. :type used_by_metadata: ~azure.servicefabric.models.UsageInfo - :param used_by_staging: The ImageStore's file system usage for staging - files that are being uploaded. + :param used_by_staging: The ImageStore's file system usage for staging files that are being + uploaded. :type used_by_staging: ~azure.servicefabric.models.UsageInfo - :param used_by_copy: the ImageStore's file system usage for copied - application and cluster packages. [Removing application and cluster - packages](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-deleteimagestorecontent) - will free up this space. + :param used_by_copy: the ImageStore's file system usage for copied application and cluster + packages. `Removing application and cluster packages + `_ will + free up this space. :type used_by_copy: ~azure.servicefabric.models.UsageInfo - :param used_by_register: the ImageStore's file system usage for registered - and cluster packages. [Unregistering - application](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-unprovisionapplicationtype) - and [cluster - packages](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-unprovisionapplicationtype) + :param used_by_register: the ImageStore's file system usage for registered and cluster + packages. `Unregistering application + `_ + and `cluster packages + `_ will free up this space. :type used_by_register: ~azure.servicefabric.models.UsageInfo """ @@ -11290,7 +12165,10 @@ class ImageStoreInfo(Model): 'used_by_register': {'key': 'UsedByRegister', 'type': 'UsageInfo'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ImageStoreInfo, self).__init__(**kwargs) self.disk_info = kwargs.get('disk_info', None) self.used_by_metadata = kwargs.get('used_by_metadata', None) @@ -11299,17 +12177,17 @@ def __init__(self, **kwargs): self.used_by_register = kwargs.get('used_by_register', None) -class SecretResourcePropertiesBase(Model): - """This type describes the properties of a secret resource, including its - kind. +class SecretResourcePropertiesBase(msrest.serialization.Model): + """This type describes the properties of a secret resource, including its kind. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SecretResourceProperties + sub-classes are: SecretResourceProperties. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values + include: "inlinedValue", "keyVaultVersionedReference". + :type kind: str or ~azure.servicefabric.models.SecretKind """ _validation = { @@ -11324,35 +12202,36 @@ class SecretResourcePropertiesBase(Model): 'kind': {'SecretResourceProperties': 'SecretResourceProperties'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SecretResourcePropertiesBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class SecretResourceProperties(SecretResourcePropertiesBase): """Describes the properties of a secret resource. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: InlinedValueSecretResourceProperties + sub-classes are: InlinedValueSecretResourceProperties. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values + include: "inlinedValue", "keyVaultVersionedReference". + :type kind: str or ~azure.servicefabric.models.SecretKind :param description: User readable description of the secret. :type description: str - :ivar status: Status of the resource. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the secret. + :ivar status_details: Gives additional information about the current status of the secret. :vartype status_details: str - :param content_type: The type of the content stored in the secret value. - The value of this property is opaque to Service Fabric. Once set, the - value of this property cannot be changed. + :param content_type: The type of the content stored in the secret value. The value of this + property is opaque to Service Fabric. Once set, the value of this property cannot be changed. :type content_type: str """ @@ -11374,40 +12253,37 @@ class SecretResourceProperties(SecretResourcePropertiesBase): 'kind': {'inlinedValue': 'InlinedValueSecretResourceProperties'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SecretResourceProperties, self).__init__(**kwargs) + self.kind = 'SecretResourceProperties' # type: str self.description = kwargs.get('description', None) self.status = None self.status_details = None self.content_type = kwargs.get('content_type', None) - self.kind = 'SecretResourceProperties' class InlinedValueSecretResourceProperties(SecretResourceProperties): - """Describes the properties of a secret resource whose value is provided - explicitly as plaintext. The secret resource may have multiple values, each - being uniquely versioned. The secret value of each version is stored - encrypted, and delivered as plaintext into the context of applications - referencing it. + """Describes the properties of a secret resource whose value is provided explicitly as plaintext. The secret resource may have multiple values, each being uniquely versioned. The secret value of each version is stored encrypted, and delivered as plaintext into the context of applications referencing it. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values + include: "inlinedValue", "keyVaultVersionedReference". + :type kind: str or ~azure.servicefabric.models.SecretKind :param description: User readable description of the secret. :type description: str - :ivar status: Status of the resource. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the secret. + :ivar status_details: Gives additional information about the current status of the secret. :vartype status_details: str - :param content_type: The type of the content stored in the secret value. - The value of this property is opaque to Service Fabric. Once set, the - value of this property cannot be changed. + :param content_type: The type of the content stored in the secret value. The value of this + property is opaque to Service Fabric. Once set, the value of this property cannot be changed. :type content_type: str """ @@ -11425,9 +12301,32 @@ class InlinedValueSecretResourceProperties(SecretResourceProperties): 'content_type': {'key': 'contentType', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(InlinedValueSecretResourceProperties, self).__init__(**kwargs) - self.kind = 'inlinedValue' + self.kind = 'inlinedValue' # type: str + + +class InstanceLifecycleDescription(msrest.serialization.Model): + """Describes how the instance will behave. + + :param restore_replica_location_after_upgrade: If set to true, move/swap replica to original + location after upgrade. + :type restore_replica_location_after_upgrade: bool + """ + + _attribute_map = { + 'restore_replica_location_after_upgrade': {'key': 'RestoreReplicaLocationAfterUpgrade', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(InstanceLifecycleDescription, self).__init__(**kwargs) + self.restore_replica_location_after_upgrade = kwargs.get('restore_replica_location_after_upgrade', None) class Int64PropertyValue(PropertyValue): @@ -11435,8 +12334,10 @@ class Int64PropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind :param data: Required. The data of the property value. :type data: str """ @@ -11451,30 +12352,32 @@ class Int64PropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(Int64PropertyValue, self).__init__(**kwargs) - self.data = kwargs.get('data', None) - self.kind = 'Int64' + self.kind = 'Int64' # type: str + self.data = kwargs['data'] -class PartitionInformation(Model): - """Information about the partition identity, partitioning scheme and keys - supported by it. +class PartitionInformation(msrest.serialization.Model): + """Information about the partition identity, partitioning scheme and keys supported by it. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, - SingletonPartitionInformation + sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, SingletonPartitionInformation. All required parameters must be populated in order to send to Azure. - :param id: An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. - The partition ID is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the IDs of its - partitions would be different. + :param service_partition_kind: Required. The kind of partitioning scheme used to partition the + service.Constant filled by server. Possible values include: "Invalid", "Singleton", + "Int64Range", "Named". + :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a + randomly generated GUID when the service was created. The partition ID is unique and does not + change for the lifetime of the service. If the same service was deleted and recreated the IDs + of its partitions would be different. :type id: str - :param service_partition_kind: Required. Constant filled by server. - :type service_partition_kind: str """ _validation = { @@ -11482,38 +12385,40 @@ class PartitionInformation(Model): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'str'}, } _subtype_map = { 'service_partition_kind': {'Int64Range': 'Int64RangePartitionInformation', 'Named': 'NamedPartitionInformation', 'Singleton': 'SingletonPartitionInformation'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionInformation, self).__init__(**kwargs) + self.service_partition_kind = None # type: Optional[str] self.id = kwargs.get('id', None) - self.service_partition_kind = None class Int64RangePartitionInformation(PartitionInformation): - """Describes the partition information for the integer range that is based on - partition schemes. + """Describes the partition information for the integer range that is based on partition schemes. All required parameters must be populated in order to send to Azure. - :param id: An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. - The partition ID is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the IDs of its - partitions would be different. + :param service_partition_kind: Required. The kind of partitioning scheme used to partition the + service.Constant filled by server. Possible values include: "Invalid", "Singleton", + "Int64Range", "Named". + :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a + randomly generated GUID when the service was created. The partition ID is unique and does not + change for the lifetime of the service. If the same service was deleted and recreated the IDs + of its partitions would be different. :type id: str - :param service_partition_kind: Required. Constant filled by server. - :type service_partition_kind: str :param low_key: Specifies the minimum key value handled by this partition. :type low_key: str - :param high_key: Specifies the maximum key value handled by this - partition. + :param high_key: Specifies the maximum key value handled by this partition. :type high_key: str """ @@ -11522,28 +12427,30 @@ class Int64RangePartitionInformation(PartitionInformation): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'str'}, 'low_key': {'key': 'LowKey', 'type': 'str'}, 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(Int64RangePartitionInformation, self).__init__(**kwargs) + self.service_partition_kind = 'Int64Range' # type: str self.low_key = kwargs.get('low_key', None) self.high_key = kwargs.get('high_key', None) - self.service_partition_kind = 'Int64Range' -class InvokeDataLossResult(Model): - """Represents information about an operation in a terminal state (Completed or - Faulted). +class InvokeDataLossResult(msrest.serialization.Model): + """Represents information about an operation in a terminal state (Completed or Faulted). - :param error_code: If OperationState is Completed, this is 0. If - OperationState is Faulted, this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, + this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the - partition that the user-induced operation acted upon. + :param selected_partition: This class returns information about the partition that the + user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -11552,21 +12459,23 @@ class InvokeDataLossResult(Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(InvokeDataLossResult, self).__init__(**kwargs) self.error_code = kwargs.get('error_code', None) self.selected_partition = kwargs.get('selected_partition', None) -class InvokeQuorumLossResult(Model): - """Represents information about an operation in a terminal state (Completed or - Faulted). +class InvokeQuorumLossResult(msrest.serialization.Model): + """Represents information about an operation in a terminal state (Completed or Faulted). - :param error_code: If OperationState is Completed, this is 0. If - OperationState is Faulted, this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, + this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the - partition that the user-induced operation acted upon. + :param selected_partition: This class returns information about the partition that the + user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -11575,22 +12484,26 @@ class InvokeQuorumLossResult(Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(InvokeQuorumLossResult, self).__init__(**kwargs) self.error_code = kwargs.get('error_code', None) self.selected_partition = kwargs.get('selected_partition', None) -class ReplicaStatusBase(Model): +class ReplicaStatusBase(msrest.serialization.Model): """Information about the replica. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: KeyValueStoreReplicaStatus + sub-classes are: KeyValueStoreReplicaStatus. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Invalid", "KeyValueStore". + :type kind: str or ~azure.servicefabric.models.ReplicaKind """ _validation = { @@ -11605,9 +12518,12 @@ class ReplicaStatusBase(Model): 'kind': {'KeyValueStore': 'KeyValueStoreReplicaStatus'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaStatusBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class KeyValueStoreReplicaStatus(ReplicaStatusBase): @@ -11615,24 +12531,22 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param database_row_count_estimate: Value indicating the estimated number - of rows in the underlying database. + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Invalid", "KeyValueStore". + :type kind: str or ~azure.servicefabric.models.ReplicaKind + :param database_row_count_estimate: Value indicating the estimated number of rows in the + underlying database. :type database_row_count_estimate: str - :param database_logical_size_estimate: Value indicating the estimated size - of the underlying database. + :param database_logical_size_estimate: Value indicating the estimated size of the underlying + database. :type database_logical_size_estimate: str - :param copy_notification_current_key_filter: Value indicating the latest - key-prefix filter applied to enumeration during the callback. Null if - there is no pending callback. + :param copy_notification_current_key_filter: Value indicating the latest key-prefix filter + applied to enumeration during the callback. Null if there is no pending callback. :type copy_notification_current_key_filter: str - :param copy_notification_current_progress: Value indicating the latest - number of keys enumerated during the callback. 0 if there is no pending - callback. + :param copy_notification_current_progress: Value indicating the latest number of keys + enumerated during the callback. 0 if there is no pending callback. :type copy_notification_current_progress: str - :param status_details: Value indicating the current status details of the - replica. + :param status_details: Value indicating the current status details of the replica. :type status_details: str """ @@ -11649,95 +12563,199 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(KeyValueStoreReplicaStatus, self).__init__(**kwargs) + self.kind = 'KeyValueStore' # type: str self.database_row_count_estimate = kwargs.get('database_row_count_estimate', None) self.database_logical_size_estimate = kwargs.get('database_logical_size_estimate', None) self.copy_notification_current_key_filter = kwargs.get('copy_notification_current_key_filter', None) self.copy_notification_current_progress = kwargs.get('copy_notification_current_progress', None) self.status_details = kwargs.get('status_details', None) - self.kind = 'KeyValueStore' -class LoadMetricInformation(Model): - """Represents data structure that contains load information for a certain - metric in a cluster. +class LoadedPartitionInformationQueryDescription(msrest.serialization.Model): + """Represents data structure that contains query information. - :param name: Name of the metric for which this load information is - provided. + :param metric_name: Name of the metric for which this information is provided. + :type metric_name: str + :param service_name: Name of the service this partition belongs to. + :type service_name: str + :param ordering: Ordering of partitions' load. Possible values include: "Desc", "Asc". + :type ordering: str or ~azure.servicefabric.models.Ordering + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. + :type continuation_token: str + """ + + _attribute_map = { + 'metric_name': {'key': 'MetricName', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'ordering': {'key': 'Ordering', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'long'}, + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(LoadedPartitionInformationQueryDescription, self).__init__(**kwargs) + self.metric_name = kwargs.get('metric_name', None) + self.service_name = kwargs.get('service_name', None) + self.ordering = kwargs.get('ordering', None) + self.max_results = kwargs.get('max_results', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class LoadedPartitionInformationResult(msrest.serialization.Model): + """Represents partition information. + + All required parameters must be populated in order to send to Azure. + + :param service_name: Required. Name of the service this partition belongs to. + :type service_name: str + :param partition_id: Required. Id of the partition. + :type partition_id: str + :param metric_name: Required. Name of the metric for which this information is provided. + :type metric_name: str + :param load: Required. Load for metric. + :type load: long + """ + + _validation = { + 'service_name': {'required': True}, + 'partition_id': {'required': True}, + 'metric_name': {'required': True}, + 'load': {'required': True}, + } + + _attribute_map = { + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'metric_name': {'key': 'MetricName', 'type': 'str'}, + 'load': {'key': 'Load', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(LoadedPartitionInformationResult, self).__init__(**kwargs) + self.service_name = kwargs['service_name'] + self.partition_id = kwargs['partition_id'] + self.metric_name = kwargs['metric_name'] + self.load = kwargs['load'] + + +class LoadedPartitionInformationResultList(msrest.serialization.Model): + """Represents data structure that contains top/least loaded partitions for a certain metric. + + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. + :type continuation_token: str + :param items: List of application information. + :type items: list[~azure.servicefabric.models.LoadedPartitionInformationResult] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[LoadedPartitionInformationResult]'}, + } + + def __init__( + self, + **kwargs + ): + super(LoadedPartitionInformationResultList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) + + +class LoadMetricInformation(msrest.serialization.Model): + """Represents data structure that contains load information for a certain metric in a cluster. + + :param name: Name of the metric for which this load information is provided. :type name: str - :param is_balanced_before: Value that indicates whether the metrics is - balanced or not before resource balancer run + :param is_balanced_before: Value that indicates whether the metrics is balanced or not before + resource balancer run. :type is_balanced_before: bool - :param is_balanced_after: Value that indicates whether the metrics is - balanced or not after resource balancer run. + :param is_balanced_after: Value that indicates whether the metrics is balanced or not after + resource balancer run. :type is_balanced_after: bool - :param deviation_before: The standard average deviation of the metrics - before resource balancer run. + :param deviation_before: The standard average deviation of the metrics before resource balancer + run. :type deviation_before: str - :param deviation_after: The standard average deviation of the metrics - after resource balancer run. + :param deviation_after: The standard average deviation of the metrics after resource balancer + run. :type deviation_after: str :param balancing_threshold: The balancing threshold for a certain metric. :type balancing_threshold: str - :param action: The current action being taken with regard to this metric + :param action: The current action being taken with regard to this metric. :type action: str - :param activity_threshold: The Activity Threshold specified for this - metric in the system Cluster Manifest. + :param activity_threshold: The Activity Threshold specified for this metric in the system + Cluster Manifest. :type activity_threshold: str - :param cluster_capacity: The total cluster capacity for a given metric + :param cluster_capacity: The total cluster capacity for a given metric. :type cluster_capacity: str - :param cluster_load: The total cluster load. In future releases of Service - Fabric this parameter will be deprecated in favor of CurrentClusterLoad. + :param cluster_load: The total cluster load. In future releases of Service Fabric this + parameter will be deprecated in favor of CurrentClusterLoad. :type cluster_load: str :param current_cluster_load: The total cluster load. :type current_cluster_load: str - :param cluster_remaining_capacity: The remaining capacity for the metric - in the cluster. In future releases of Service Fabric this parameter will - be deprecated in favor of ClusterCapacityRemaining. + :param cluster_remaining_capacity: The remaining capacity for the metric in the cluster. In + future releases of Service Fabric this parameter will be deprecated in favor of + ClusterCapacityRemaining. :type cluster_remaining_capacity: str - :param cluster_capacity_remaining: The remaining capacity for the metric - in the cluster. + :param cluster_capacity_remaining: The remaining capacity for the metric in the cluster. :type cluster_capacity_remaining: str - :param is_cluster_capacity_violation: Indicates that the metric is - currently over capacity in the cluster. + :param is_cluster_capacity_violation: Indicates that the metric is currently over capacity in + the cluster. :type is_cluster_capacity_violation: bool - :param node_buffer_percentage: The reserved percentage of total node - capacity for this metric. + :param node_buffer_percentage: The reserved percentage of total node capacity for this metric. :type node_buffer_percentage: str - :param cluster_buffered_capacity: Remaining capacity in the cluster - excluding the reserved space. In future releases of Service Fabric this - parameter will be deprecated in favor of BufferedClusterCapacityRemaining. + :param cluster_buffered_capacity: Remaining capacity in the cluster excluding the reserved + space. In future releases of Service Fabric this parameter will be deprecated in favor of + BufferedClusterCapacityRemaining. :type cluster_buffered_capacity: str - :param buffered_cluster_capacity_remaining: Remaining capacity in the - cluster excluding the reserved space. + :param buffered_cluster_capacity_remaining: Remaining capacity in the cluster excluding the + reserved space. :type buffered_cluster_capacity_remaining: str - :param cluster_remaining_buffered_capacity: The remaining percentage of - cluster total capacity for this metric. + :param cluster_remaining_buffered_capacity: The remaining percentage of cluster total capacity + for this metric. :type cluster_remaining_buffered_capacity: str - :param min_node_load_value: The minimum load on any node for this metric. - In future releases of Service Fabric this parameter will be deprecated in - favor of MinimumNodeLoad. + :param min_node_load_value: The minimum load on any node for this metric. In future releases of + Service Fabric this parameter will be deprecated in favor of MinimumNodeLoad. :type min_node_load_value: str :param minimum_node_load: The minimum load on any node for this metric. :type minimum_node_load: str - :param min_node_load_node_id: The node id of the node with the minimum - load for this metric. + :param min_node_load_node_id: The node id of the node with the minimum load for this metric. :type min_node_load_node_id: ~azure.servicefabric.models.NodeId - :param max_node_load_value: The maximum load on any node for this metric. - In future releases of Service Fabric this parameter will be deprecated in - favor of MaximumNodeLoad. + :param max_node_load_value: The maximum load on any node for this metric. In future releases of + Service Fabric this parameter will be deprecated in favor of MaximumNodeLoad. :type max_node_load_value: str :param maximum_node_load: The maximum load on any node for this metric. :type maximum_node_load: str - :param max_node_load_node_id: The node id of the node with the maximum - load for this metric. + :param max_node_load_node_id: The node id of the node with the maximum load for this metric. :type max_node_load_node_id: ~azure.servicefabric.models.NodeId - :param planned_load_removal: This value represents the load of the - replicas that are planned to be removed in the future within the cluster. - This kind of load is reported for replicas that are currently being moving - to other nodes and for replicas that are currently being dropped but still - use the load on the source node. + :param planned_load_removal: This value represents the load of the replicas that are planned to + be removed in the future within the cluster. + This kind of load is reported for replicas that are currently being moving to other nodes and + for replicas that are currently being dropped but still use the load on the source node. :type planned_load_removal: str """ @@ -11769,7 +12787,10 @@ class LoadMetricInformation(Model): 'planned_load_removal': {'key': 'PlannedLoadRemoval', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(LoadMetricInformation, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.is_balanced_before = kwargs.get('is_balanced_before', None) @@ -11798,16 +12819,15 @@ def __init__(self, **kwargs): self.planned_load_removal = kwargs.get('planned_load_removal', None) -class LoadMetricReport(Model): - """Represents the load metric report which contains the time metric was - reported, its name and value. +class LoadMetricReport(msrest.serialization.Model): + """Represents the load metric report which contains the time metric was reported, its name and value. :param last_reported_utc: Gets the UTC time when the load was reported. - :type last_reported_utc: datetime + :type last_reported_utc: ~datetime.datetime :param name: The name of the load metric. :type name: str - :param value: The value of the load metric. In future releases of Service - Fabric this parameter will be deprecated in favor of CurrentValue. + :param value: The value of the load metric. In future releases of Service Fabric this parameter + will be deprecated in favor of CurrentValue. :type value: str :param current_value: The value of the load metric. :type current_value: str @@ -11820,7 +12840,10 @@ class LoadMetricReport(Model): 'current_value': {'key': 'CurrentValue', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(LoadMetricReport, self).__init__(**kwargs) self.last_reported_utc = kwargs.get('last_reported_utc', None) self.name = kwargs.get('name', None) @@ -11828,18 +12851,18 @@ def __init__(self, **kwargs): self.current_value = kwargs.get('current_value', None) -class LoadMetricReportInfo(Model): +class LoadMetricReportInfo(msrest.serialization.Model): """Information about load reported by replica. :param name: The name of the metric. :type name: str - :param value: The value of the load for the metric. In future releases of - Service Fabric this parameter will be deprecated in favor of CurrentValue. + :param value: The value of the load for the metric. In future releases of Service Fabric this + parameter will be deprecated in favor of CurrentValue. :type value: int :param current_value: The double value of the load for the metric. :type current_value: str :param last_reported_utc: The UTC time when the load is reported. - :type last_reported_utc: datetime + :type last_reported_utc: ~datetime.datetime """ _attribute_map = { @@ -11849,7 +12872,10 @@ class LoadMetricReportInfo(Model): 'last_reported_utc': {'key': 'LastReportedUtc', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(LoadMetricReportInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) @@ -11857,17 +12883,17 @@ def __init__(self, **kwargs): self.last_reported_utc = kwargs.get('last_reported_utc', None) -class NetworkResourcePropertiesBase(Model): - """This type describes the properties of a network resource, including its - kind. +class NetworkResourcePropertiesBase(msrest.serialization.Model): + """This type describes the properties of a network resource, including its kind. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NetworkResourceProperties + sub-classes are: NetworkResourceProperties. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of a Service Fabric container network.Constant filled by + server. Possible values include: "Local". + :type kind: str or ~azure.servicefabric.models.NetworkKind """ _validation = { @@ -11882,31 +12908,33 @@ class NetworkResourcePropertiesBase(Model): 'kind': {'NetworkResourceProperties': 'NetworkResourceProperties'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NetworkResourcePropertiesBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class NetworkResourceProperties(NetworkResourcePropertiesBase): """Describes properties of a network resource. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LocalNetworkResourceProperties + sub-classes are: LocalNetworkResourceProperties. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of a Service Fabric container network.Constant filled by + server. Possible values include: "Local". + :type kind: str or ~azure.servicefabric.models.NetworkKind :param description: User readable description of the network. :type description: str - :ivar status: Status of the network. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the network. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the network. + :ivar status_details: Gives additional information about the current status of the network. :vartype status_details: str """ @@ -11927,35 +12955,35 @@ class NetworkResourceProperties(NetworkResourcePropertiesBase): 'kind': {'Local': 'LocalNetworkResourceProperties'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NetworkResourceProperties, self).__init__(**kwargs) + self.kind = 'NetworkResourceProperties' # type: str self.description = kwargs.get('description', None) self.status = None self.status_details = None - self.kind = 'NetworkResourceProperties' class LocalNetworkResourceProperties(NetworkResourceProperties): - """Information about a Service Fabric container network local to a single - Service Fabric cluster. + """Information about a Service Fabric container network local to a single Service Fabric cluster. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of a Service Fabric container network.Constant filled by + server. Possible values include: "Local". + :type kind: str or ~azure.servicefabric.models.NetworkKind :param description: User readable description of the network. :type description: str - :ivar status: Status of the network. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the network. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the network. + :ivar status_details: Gives additional information about the current status of the network. :vartype status_details: str - :param network_address_prefix: Address space for the local container - network. + :param network_address_prefix: Address space for the local container network. :type network_address_prefix: str """ @@ -11973,13 +13001,16 @@ class LocalNetworkResourceProperties(NetworkResourceProperties): 'network_address_prefix': {'key': 'networkAddressPrefix', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(LocalNetworkResourceProperties, self).__init__(**kwargs) + self.kind = 'Local' # type: str self.network_address_prefix = kwargs.get('network_address_prefix', None) - self.kind = 'Local' -class ManagedApplicationIdentity(Model): +class ManagedApplicationIdentity(msrest.serialization.Model): """Describes a managed application identity. All required parameters must be populated in order to send to Azure. @@ -11999,20 +13030,22 @@ class ManagedApplicationIdentity(Model): 'principal_id': {'key': 'PrincipalId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ManagedApplicationIdentity, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.principal_id = kwargs.get('principal_id', None) -class ManagedApplicationIdentityDescription(Model): +class ManagedApplicationIdentityDescription(msrest.serialization.Model): """Managed application identity description. :param token_service_endpoint: Token service endpoint. :type token_service_endpoint: str :param managed_identities: A list of managed application identity objects. - :type managed_identities: - list[~azure.servicefabric.models.ManagedApplicationIdentity] + :type managed_identities: list[~azure.servicefabric.models.ManagedApplicationIdentity] """ _attribute_map = { @@ -12020,13 +13053,63 @@ class ManagedApplicationIdentityDescription(Model): 'managed_identities': {'key': 'ManagedIdentities', 'type': '[ManagedApplicationIdentity]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ManagedApplicationIdentityDescription, self).__init__(**kwargs) self.token_service_endpoint = kwargs.get('token_service_endpoint', None) self.managed_identities = kwargs.get('managed_identities', None) -class MetricLoadDescription(Model): +class ManagedIdentityAzureBlobBackupStorageDescription(BackupStorageDescription): + """Describes the parameters for Azure blob store (connected using managed identity) used for storing and enumerating backups. + + All required parameters must be populated in order to send to Azure. + + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind + :param friendly_name: Friendly name for this backup storage. + :type friendly_name: str + :param managed_identity_type: Required. The type of managed identity to be used to connect to + Azure Blob Store via Managed Identity. Possible values include: "Invalid", "VMSS", "Cluster". + :type managed_identity_type: str or ~azure.servicefabric.models.ManagedIdentityType + :param blob_service_uri: Required. The Blob Service Uri to connect to the Azure blob store.. + :type blob_service_uri: str + :param container_name: Required. The name of the container in the blob store to store and + enumerate backups from. + :type container_name: str + """ + + _validation = { + 'storage_kind': {'required': True}, + 'managed_identity_type': {'required': True}, + 'blob_service_uri': {'required': True}, + 'container_name': {'required': True}, + } + + _attribute_map = { + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'managed_identity_type': {'key': 'ManagedIdentityType', 'type': 'str'}, + 'blob_service_uri': {'key': 'BlobServiceUri', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ManagedIdentityAzureBlobBackupStorageDescription, self).__init__(**kwargs) + self.storage_kind = 'ManagedIdentityAzureBlobStore' # type: str + self.managed_identity_type = kwargs['managed_identity_type'] + self.blob_service_uri = kwargs['blob_service_uri'] + self.container_name = kwargs['container_name'] + + +class MetricLoadDescription(msrest.serialization.Model): """Specifies metric load information. :param metric_name: The name of the reported metric. @@ -12043,52 +13126,50 @@ class MetricLoadDescription(Model): 'predicted_load': {'key': 'PredictedLoad', 'type': 'long'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(MetricLoadDescription, self).__init__(**kwargs) self.metric_name = kwargs.get('metric_name', None) self.current_load = kwargs.get('current_load', None) self.predicted_load = kwargs.get('predicted_load', None) -class MonitoringPolicyDescription(Model): +class MonitoringPolicyDescription(msrest.serialization.Model): """Describes the parameters for monitoring an upgrade in Monitored mode. - :param failure_action: The compensating action to perform when a Monitored - upgrade encounters monitoring policy or health policy violations. - Invalid indicates the failure action is invalid. Rollback specifies that - the upgrade will start rolling back automatically. - Manual indicates that the upgrade will switch to UnmonitoredManual upgrade - mode. Possible values include: 'Invalid', 'Rollback', 'Manual' + :param failure_action: The compensating action to perform when a Monitored upgrade encounters + monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will + start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. Possible + values include: "Invalid", "Rollback", "Manual". :type failure_action: str or ~azure.servicefabric.models.FailureAction - :param health_check_wait_duration_in_milliseconds: The amount of time to - wait after completing an upgrade domain before applying health policies. - It is first interpreted as a string representing an ISO 8601 duration. If - that fails, then it is interpreted as a number representing the total - number of milliseconds. + :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing + an upgrade domain before applying health policies. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted as a number + representing the total number of milliseconds. :type health_check_wait_duration_in_milliseconds: str - :param health_check_stable_duration_in_milliseconds: The amount of time - that the application or cluster must remain healthy before the upgrade - proceeds to the next upgrade domain. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param health_check_stable_duration_in_milliseconds: The amount of time that the application or + cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type health_check_stable_duration_in_milliseconds: str - :param health_check_retry_timeout_in_milliseconds: The amount of time to - retry health evaluation when the application or cluster is unhealthy - before FailureAction is executed. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param health_check_retry_timeout_in_milliseconds: The amount of time to retry health + evaluation when the application or cluster is unhealthy before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type health_check_retry_timeout_in_milliseconds: str - :param upgrade_timeout_in_milliseconds: The amount of time the overall - upgrade has to complete before FailureAction is executed. It is first - interpreted as a string representing an ISO 8601 duration. If that fails, - then it is interpreted as a number representing the total number of + :param upgrade_timeout_in_milliseconds: The amount of time the overall upgrade has to complete + before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 + duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout_in_milliseconds: str - :param upgrade_domain_timeout_in_milliseconds: The amount of time each - upgrade domain has to complete before FailureAction is executed. It is - first interpreted as a string representing an ISO 8601 duration. If that - fails, then it is interpreted as a number representing the total number of - milliseconds. + :param upgrade_domain_timeout_in_milliseconds: The amount of time each upgrade domain has to + complete before FailureAction is executed. It is first interpreted as a string representing an + ISO 8601 duration. If that fails, then it is interpreted as a number representing the total + number of milliseconds. :type upgrade_domain_timeout_in_milliseconds: str """ @@ -12101,23 +13182,25 @@ class MonitoringPolicyDescription(Model): 'upgrade_domain_timeout_in_milliseconds': {'key': 'UpgradeDomainTimeoutInMilliseconds', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(MonitoringPolicyDescription, self).__init__(**kwargs) self.failure_action = kwargs.get('failure_action', None) - self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', None) - self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', None) - self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', None) - self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', None) - self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', None) + self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', "0") + self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', "PT0H2M0S") + self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', "PT0H10M0S") + self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', "P10675199DT02H48M05.4775807S") + self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', "P10675199DT02H48M05.4775807S") -class NameDescription(Model): +class NameDescription(msrest.serialization.Model): """Describes a Service Fabric name. All required parameters must be populated in order to send to Azure. - :param name: Required. The Service Fabric name, including the 'fabric:' - URI scheme. + :param name: Required. The Service Fabric name, including the 'fabric:' URI scheme. :type name: str """ @@ -12129,25 +13212,28 @@ class NameDescription(Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NameDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] class NamedPartitionInformation(PartitionInformation): - """Describes the partition information for the name as a string that is based - on partition schemes. + """Describes the partition information for the name as a string that is based on partition schemes. All required parameters must be populated in order to send to Azure. - :param id: An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. - The partition ID is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the IDs of its - partitions would be different. + :param service_partition_kind: Required. The kind of partitioning scheme used to partition the + service.Constant filled by server. Possible values include: "Invalid", "Singleton", + "Int64Range", "Named". + :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a + randomly generated GUID when the service was created. The partition ID is unique and does not + change for the lifetime of the service. If the same service was deleted and recreated the IDs + of its partitions would be different. :type id: str - :param service_partition_kind: Required. Constant filled by server. - :type service_partition_kind: str :param name: Name of the partition. :type name: str """ @@ -12157,29 +13243,31 @@ class NamedPartitionInformation(PartitionInformation): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NamedPartitionInformation, self).__init__(**kwargs) + self.service_partition_kind = 'Named' # type: str self.name = kwargs.get('name', None) - self.service_partition_kind = 'Named' -class PartitionSchemeDescription(Model): +class PartitionSchemeDescription(msrest.serialization.Model): """Describes how the service is partitioned. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NamedPartitionSchemeDescription, - SingletonPartitionSchemeDescription, - UniformInt64RangePartitionSchemeDescription + sub-classes are: NamedPartitionSchemeDescription, SingletonPartitionSchemeDescription, UniformInt64RangePartitionSchemeDescription. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Constant filled by server. - :type partition_scheme: str + :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by + server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". + :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme """ _validation = { @@ -12194,9 +13282,12 @@ class PartitionSchemeDescription(Model): 'partition_scheme': {'Named': 'NamedPartitionSchemeDescription', 'Singleton': 'SingletonPartitionSchemeDescription', 'UniformInt64Range': 'UniformInt64RangePartitionSchemeDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = None + self.partition_scheme = None # type: Optional[str] class NamedPartitionSchemeDescription(PartitionSchemeDescription): @@ -12204,12 +13295,13 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Constant filled by server. - :type partition_scheme: str + :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by + server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". + :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme :param count: Required. The number of partitions. :type count: int - :param names: Required. Array of size specified by the ‘Count’ parameter, - for the names of the partitions. + :param names: Required. Array of size specified by the ‘Count’ parameter, for the names of the + partitions. :type names: list[str] """ @@ -12225,20 +13317,22 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): 'names': {'key': 'Names', 'type': '[str]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NamedPartitionSchemeDescription, self).__init__(**kwargs) - self.count = kwargs.get('count', None) - self.names = kwargs.get('names', None) - self.partition_scheme = 'Named' + self.partition_scheme = 'Named' # type: str + self.count = kwargs['count'] + self.names = kwargs['names'] -class NetworkRef(Model): +class NetworkRef(msrest.serialization.Model): """Describes a network reference in a service. - :param name: Name of the network + :param name: Name of the network. :type name: str - :param endpoint_refs: A list of endpoints that are exposed on this - network. + :param endpoint_refs: A list of endpoints that are exposed on this network. :type endpoint_refs: list[~azure.servicefabric.models.EndpointRef] """ @@ -12247,13 +13341,16 @@ class NetworkRef(Model): 'endpoint_refs': {'key': 'endpointRefs', 'type': '[EndpointRef]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NetworkRef, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.endpoint_refs = kwargs.get('endpoint_refs', None) -class NetworkResourceDescription(Model): +class NetworkResourceDescription(msrest.serialization.Model): """This type describes a network resource. All required parameters must be populated in order to send to Azure. @@ -12274,10 +13371,13 @@ class NetworkResourceDescription(Model): 'properties': {'key': 'properties', 'type': 'NetworkResourceProperties'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NetworkResourceDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.properties = kwargs.get('properties', None) + self.name = kwargs['name'] + self.properties = kwargs['properties'] class NodeAbortedEvent(NodeEvent): @@ -12285,18 +13385,38 @@ class NodeAbortedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -12318,9 +13438,9 @@ class NodeAbortedEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -12333,11 +13453,11 @@ class NodeAbortedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -12349,17 +13469,20 @@ class NodeAbortedEvent(NodeEvent): 'node_version': {'key': 'NodeVersion', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeAbortedEvent, self).__init__(**kwargs) - self.node_instance = kwargs.get('node_instance', None) - self.node_id = kwargs.get('node_id', None) - self.upgrade_domain = kwargs.get('upgrade_domain', None) - self.fault_domain = kwargs.get('fault_domain', None) - self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) - self.hostname = kwargs.get('hostname', None) - self.is_seed_node = kwargs.get('is_seed_node', None) - self.node_version = kwargs.get('node_version', None) - self.kind = 'NodeAborted' + self.kind = 'NodeAborted' # type: str + self.node_instance = kwargs['node_instance'] + self.node_id = kwargs['node_id'] + self.upgrade_domain = kwargs['upgrade_domain'] + self.fault_domain = kwargs['fault_domain'] + self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] + self.hostname = kwargs['hostname'] + self.is_seed_node = kwargs['is_seed_node'] + self.node_version = kwargs['node_version'] class NodeAddedToClusterEvent(NodeEvent): @@ -12367,18 +13490,38 @@ class NodeAddedToClusterEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -12396,9 +13539,9 @@ class NodeAddedToClusterEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -12409,11 +13552,11 @@ class NodeAddedToClusterEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, @@ -12423,15 +13566,18 @@ class NodeAddedToClusterEvent(NodeEvent): 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeAddedToClusterEvent, self).__init__(**kwargs) - self.node_id = kwargs.get('node_id', None) - self.node_instance = kwargs.get('node_instance', None) - self.node_type = kwargs.get('node_type', None) - self.fabric_version = kwargs.get('fabric_version', None) - self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) - self.node_capacities = kwargs.get('node_capacities', None) - self.kind = 'NodeAddedToCluster' + self.kind = 'NodeAddedToCluster' # type: str + self.node_id = kwargs['node_id'] + self.node_instance = kwargs['node_instance'] + self.node_type = kwargs['node_type'] + self.fabric_version = kwargs['fabric_version'] + self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] + self.node_capacities = kwargs['node_capacities'] class NodeClosedEvent(NodeEvent): @@ -12439,18 +13585,38 @@ class NodeClosedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -12462,9 +13628,9 @@ class NodeClosedEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -12472,23 +13638,26 @@ class NodeClosedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'error': {'key': 'Error', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeClosedEvent, self).__init__(**kwargs) - self.node_id = kwargs.get('node_id', None) - self.node_instance = kwargs.get('node_instance', None) - self.error = kwargs.get('error', None) - self.kind = 'NodeClosed' + self.kind = 'NodeClosed' # type: str + self.node_id = kwargs['node_id'] + self.node_instance = kwargs['node_instance'] + self.error = kwargs['error'] class NodeDeactivateCompletedEvent(NodeEvent): @@ -12496,18 +13665,38 @@ class NodeDeactivateCompletedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -12517,13 +13706,13 @@ class NodeDeactivateCompletedEvent(NodeEvent): :param batch_ids_with_deactivate_intent: Required. Batch Ids. :type batch_ids_with_deactivate_intent: str :param start_time: Required. Start time. - :type start_time: datetime + :type start_time: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'effective_deactivate_intent': {'required': True}, @@ -12532,11 +13721,11 @@ class NodeDeactivateCompletedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'effective_deactivate_intent': {'key': 'EffectiveDeactivateIntent', 'type': 'str'}, @@ -12544,13 +13733,16 @@ class NodeDeactivateCompletedEvent(NodeEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeDeactivateCompletedEvent, self).__init__(**kwargs) - self.node_instance = kwargs.get('node_instance', None) - self.effective_deactivate_intent = kwargs.get('effective_deactivate_intent', None) - self.batch_ids_with_deactivate_intent = kwargs.get('batch_ids_with_deactivate_intent', None) - self.start_time = kwargs.get('start_time', None) - self.kind = 'NodeDeactivateCompleted' + self.kind = 'NodeDeactivateCompleted' # type: str + self.node_instance = kwargs['node_instance'] + self.effective_deactivate_intent = kwargs['effective_deactivate_intent'] + self.batch_ids_with_deactivate_intent = kwargs['batch_ids_with_deactivate_intent'] + self.start_time = kwargs['start_time'] class NodeDeactivateStartedEvent(NodeEvent): @@ -12558,18 +13750,38 @@ class NodeDeactivateStartedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -12581,9 +13793,9 @@ class NodeDeactivateStartedEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'batch_id': {'required': True}, @@ -12591,46 +13803,44 @@ class NodeDeactivateStartedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'batch_id': {'key': 'BatchId', 'type': 'str'}, 'deactivate_intent': {'key': 'DeactivateIntent', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeDeactivateStartedEvent, self).__init__(**kwargs) - self.node_instance = kwargs.get('node_instance', None) - self.batch_id = kwargs.get('batch_id', None) - self.deactivate_intent = kwargs.get('deactivate_intent', None) - self.kind = 'NodeDeactivateStarted' - - -class NodeDeactivationInfo(Model): - """Information about the node deactivation. This information is valid for a - node that is undergoing deactivation or has already been deactivated. - - :param node_deactivation_intent: The intent or the reason for deactivating - the node. Following are the possible values for it. Possible values - include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' - :type node_deactivation_intent: str or - ~azure.servicefabric.models.NodeDeactivationIntent - :param node_deactivation_status: The status of node deactivation - operation. Following are the possible values. Possible values include: - 'None', 'SafetyCheckInProgress', 'SafetyCheckComplete', 'Completed' - :type node_deactivation_status: str or - ~azure.servicefabric.models.NodeDeactivationStatus - :param node_deactivation_task: List of tasks representing the deactivation - operation on the node. - :type node_deactivation_task: - list[~azure.servicefabric.models.NodeDeactivationTask] - :param pending_safety_checks: List of pending safety checks - :type pending_safety_checks: - list[~azure.servicefabric.models.SafetyCheckWrapper] + self.kind = 'NodeDeactivateStarted' # type: str + self.node_instance = kwargs['node_instance'] + self.batch_id = kwargs['batch_id'] + self.deactivate_intent = kwargs['deactivate_intent'] + + +class NodeDeactivationInfo(msrest.serialization.Model): + """Information about the node deactivation. This information is valid for a node that is undergoing deactivation or has already been deactivated. + + :param node_deactivation_intent: The intent or the reason for deactivating the node. Following + are the possible values for it. Possible values include: "Invalid", "Pause", "Restart", + "RemoveData", "RemoveNode". + :type node_deactivation_intent: str or ~azure.servicefabric.models.NodeDeactivationIntent + :param node_deactivation_status: The status of node deactivation operation. Following are the + possible values. Possible values include: "None", "SafetyCheckInProgress", + "SafetyCheckComplete", "Completed". + :type node_deactivation_status: str or ~azure.servicefabric.models.NodeDeactivationStatus + :param node_deactivation_task: List of tasks representing the deactivation operation on the + node. + :type node_deactivation_task: list[~azure.servicefabric.models.NodeDeactivationTask] + :param pending_safety_checks: List of pending safety checks. + :type pending_safety_checks: list[~azure.servicefabric.models.SafetyCheckWrapper] """ _attribute_map = { @@ -12640,7 +13850,10 @@ class NodeDeactivationInfo(Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeDeactivationInfo, self).__init__(**kwargs) self.node_deactivation_intent = kwargs.get('node_deactivation_intent', None) self.node_deactivation_status = kwargs.get('node_deactivation_status', None) @@ -12648,18 +13861,16 @@ def __init__(self, **kwargs): self.pending_safety_checks = kwargs.get('pending_safety_checks', None) -class NodeDeactivationTask(Model): +class NodeDeactivationTask(msrest.serialization.Model): """The task representing the deactivation operation on the node. - :param node_deactivation_task_id: Identity of the task related to - deactivation operation on the node. - :type node_deactivation_task_id: - ~azure.servicefabric.models.NodeDeactivationTaskId - :param node_deactivation_intent: The intent or the reason for deactivating - the node. Following are the possible values for it. Possible values - include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' - :type node_deactivation_intent: str or - ~azure.servicefabric.models.NodeDeactivationIntent + :param node_deactivation_task_id: Identity of the task related to deactivation operation on the + node. + :type node_deactivation_task_id: ~azure.servicefabric.models.NodeDeactivationTaskId + :param node_deactivation_intent: The intent or the reason for deactivating the node. Following + are the possible values for it. Possible values include: "Invalid", "Pause", "Restart", + "RemoveData", "RemoveNode". + :type node_deactivation_intent: str or ~azure.servicefabric.models.NodeDeactivationIntent """ _attribute_map = { @@ -12667,22 +13878,24 @@ class NodeDeactivationTask(Model): 'node_deactivation_intent': {'key': 'NodeDeactivationIntent', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeDeactivationTask, self).__init__(**kwargs) self.node_deactivation_task_id = kwargs.get('node_deactivation_task_id', None) self.node_deactivation_intent = kwargs.get('node_deactivation_intent', None) -class NodeDeactivationTaskId(Model): +class NodeDeactivationTaskId(msrest.serialization.Model): """Identity of the task related to deactivation operation on the node. :param id: Value of the task id. :type id: str - :param node_deactivation_task_type: The type of the task that performed - the node deactivation. Following are the possible values. Possible values - include: 'Invalid', 'Infrastructure', 'Repair', 'Client' - :type node_deactivation_task_type: str or - ~azure.servicefabric.models.NodeDeactivationTaskType + :param node_deactivation_task_type: The type of the task that performed the node deactivation. + Following are the possible values. Possible values include: "Invalid", "Infrastructure", + "Repair", "Client". + :type node_deactivation_task_type: str or ~azure.servicefabric.models.NodeDeactivationTaskType """ _attribute_map = { @@ -12690,7 +13903,10 @@ class NodeDeactivationTaskId(Model): 'node_deactivation_task_type': {'key': 'NodeDeactivationTaskType', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeDeactivationTaskId, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.node_deactivation_task_type = kwargs.get('node_deactivation_task_type', None) @@ -12701,75 +13917,95 @@ class NodeDownEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. :type node_instance: long :param last_node_up_at: Required. Time when Node was last up. - :type last_node_up_at: datetime + :type last_node_up_at: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'last_node_up_at': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'last_node_up_at': {'key': 'LastNodeUpAt', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeDownEvent, self).__init__(**kwargs) - self.node_instance = kwargs.get('node_instance', None) - self.last_node_up_at = kwargs.get('last_node_up_at', None) - self.kind = 'NodeDown' + self.kind = 'NodeDown' # type: str + self.node_instance = kwargs['node_instance'] + self.last_node_up_at = kwargs['last_node_up_at'] class NodeHealth(EntityHealth): """Information about the health of a Service Fabric node. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: Name of the node whose health information is described by - this object. + :param name: Name of the node whose health information is described by this object. :type name: str """ @@ -12781,37 +14017,41 @@ class NodeHealth(EntityHealth): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeHealth, self).__init__(**kwargs) self.name = kwargs.get('name', None) class NodeHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a node, containing information about the - data and the algorithm used by health store to evaluate health. The - evaluation is returned only when the aggregated health state is either - Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a node, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the node. The types of the - unhealthy evaluations can be EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the node. The types of the unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -12819,18 +14059,21 @@ class NodeHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Node' # type: str self.node_name = kwargs.get('node_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Node' class NodeHealthReportExpiredEvent(NodeEvent): @@ -12838,18 +14081,38 @@ class NodeHealthReportExpiredEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -12866,17 +14129,16 @@ class NodeHealthReportExpiredEvent(NodeEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -12890,11 +14152,11 @@ class NodeHealthReportExpiredEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -12907,34 +14169,34 @@ class NodeHealthReportExpiredEvent(NodeEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeHealthReportExpiredEvent, self).__init__(**kwargs) - self.node_instance_id = kwargs.get('node_instance_id', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'NodeHealthReportExpired' + self.kind = 'NodeHealthReportExpired' # type: str + self.node_instance_id = kwargs['node_instance_id'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class NodeHealthState(EntityHealthState): - """Represents the health state of a node, which contains the node identifier - and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + """Represents the health state of a node, which contains the node identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param name: The name of a Service Fabric node. :type name: str - :param id: An internal ID used by Service Fabric to uniquely identify a - node. Node Id is deterministically generated from node name. + :param id: An internal ID used by Service Fabric to uniquely identify a node. Node Id is + deterministically generated from node name. :type id: ~azure.servicefabric.models.NodeId """ @@ -12944,19 +14206,21 @@ class NodeHealthState(EntityHealthState): 'id': {'key': 'Id', 'type': 'NodeId'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeHealthState, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.id = kwargs.get('id', None) class NodeHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a node, which contains the node name - and its aggregated health state. + """Represents the health state chunk of a node, which contains the node name and its aggregated health state. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str @@ -12967,21 +14231,22 @@ class NodeHealthStateChunk(EntityHealthStateChunk): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeHealthStateChunk, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) class NodeHealthStateChunkList(EntityHealthStateChunkList): - """The list of node health state chunks in the cluster that respect the input - filters in the chunk query. Returned by get cluster health state chunks - query. + """The list of node health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param total_count: Total number of entity health state objects that match - the specified filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match the specified + filters from the cluster health chunk query description. :type total_count: long - :param items: The list of node health state chunks that respect the input - filters in the chunk query. + :param items: The list of node health state chunks that respect the input filters in the chunk + query. :type items: list[~azure.servicefabric.models.NodeHealthStateChunk] """ @@ -12990,51 +14255,48 @@ class NodeHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[NodeHealthStateChunk]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class NodeHealthStateFilter(Model): - """Defines matching criteria to determine whether a node should be included in - the returned cluster health chunk. - One filter can match zero, one or multiple nodes, depending on its - properties. - Can be specified in the cluster health chunk query description. - - :param node_name_filter: Name of the node that matches the filter. The - filter is applied only to the specified node, if it exists. - If the node doesn't exist, no node is returned in the cluster health chunk - based on this filter. - If the node exists, it is included in the cluster health chunk if the - health state matches the other filter properties. - If not specified, all nodes that match the parent filters (if any) are - taken into consideration and matched against the other filter members, - like health state filter. +class NodeHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a node should be included in the returned cluster health chunk. +One filter can match zero, one or multiple nodes, depending on its properties. +Can be specified in the cluster health chunk query description. + + :param node_name_filter: Name of the node that matches the filter. The filter is applied only + to the specified node, if it exists. + If the node doesn't exist, no node is returned in the cluster health chunk based on this + filter. + If the node exists, it is included in the cluster health chunk if the health state matches the + other filter properties. + If not specified, all nodes that match the parent filters (if any) are taken into + consideration and matched against the other filter members, like health state filter. :type node_name_filter: str - :param health_state_filter: The filter for the health state of the nodes. - It allows selecting nodes if they match the desired health states. - The possible values are integer value of one of the following health - states. Only nodes that match the filter are returned. All nodes are used - to evaluate the cluster aggregated health state. - If not specified, default value is None, unless the node name is - specified. If the filter has default value and node name is specified, the - matching node is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches nodes with HealthState - value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the nodes. It allows selecting + nodes if they match the desired health states. + The possible values are integer value of one of the following health states. Only nodes that + match the filter are returned. All nodes are used to evaluate the cluster aggregated health + state. + If not specified, default value is None, unless the node name is specified. If the filter has + default value and node name is specified, the matching node is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches nodes with HealthState value of OK (2) and + Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int """ @@ -13043,15 +14305,17 @@ class NodeHealthStateFilter(Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeHealthStateFilter, self).__init__(**kwargs) self.node_name_filter = kwargs.get('node_name_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) -class NodeId(Model): - """An internal ID used by Service Fabric to uniquely identify a node. Node Id - is deterministically generated from node name. +class NodeId(msrest.serialization.Model): + """An internal ID used by Service Fabric to uniquely identify a node. Node Id is deterministically generated from node name. :param id: Value of the node Id. This is a 128 bit integer. :type id: str @@ -13061,22 +14325,25 @@ class NodeId(Model): 'id': {'key': 'Id', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeId, self).__init__(**kwargs) self.id = kwargs.get('id', None) -class NodeImpact(Model): +class NodeImpact(msrest.serialization.Model): """Describes the expected impact of a repair to a particular node. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param node_name: Required. The name of the impacted node. :type node_name: str - :param impact_level: The level of impact expected. Possible values - include: 'Invalid', 'None', 'Restart', 'RemoveData', 'RemoveNode' + :param impact_level: The level of impact expected. Possible values include: "Invalid", "None", + "Restart", "RemoveData", "RemoveNode". :type impact_level: str or ~azure.servicefabric.models.ImpactLevel """ @@ -13089,71 +14356,70 @@ class NodeImpact(Model): 'impact_level': {'key': 'ImpactLevel', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeImpact, self).__init__(**kwargs) - self.node_name = kwargs.get('node_name', None) + self.node_name = kwargs['node_name'] self.impact_level = kwargs.get('impact_level', None) -class NodeInfo(Model): +class NodeInfo(msrest.serialization.Model): """Information about a node in Service Fabric cluster. :param name: The name of a Service Fabric node. :type name: str - :param ip_address_or_fqdn: The IP address or fully qualified domain name - of the node. + :param ip_address_or_fqdn: The IP address or fully qualified domain name of the node. :type ip_address_or_fqdn: str :param type: The type of the node. :type type: str - :param code_version: The version of Service Fabric binaries that the node - is running. + :param code_version: The version of Service Fabric binaries that the node is running. :type code_version: str - :param config_version: The version of Service Fabric cluster manifest that - the node is using. + :param config_version: The version of Service Fabric cluster manifest that the node is using. :type config_version: str - :param node_status: The status of the node. Possible values include: - 'Invalid', 'Up', 'Down', 'Enabling', 'Disabling', 'Disabled', 'Unknown', - 'Removed' + :param node_status: The status of the node. Possible values include: "Invalid", "Up", "Down", + "Enabling", "Disabling", "Disabled", "Unknown", "Removed". :type node_status: str or ~azure.servicefabric.models.NodeStatus - :param node_up_time_in_seconds: Time in seconds since the node has been in - NodeStatus Up. Value zero indicates that the node is not Up. + :param node_up_time_in_seconds: Time in seconds since the node has been in NodeStatus Up. Value + zero indicates that the node is not Up. :type node_up_time_in_seconds: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param is_seed_node: Indicates if the node is a seed node or not. Returns - true if the node is a seed node, otherwise false. A quorum of seed nodes - are required for proper operation of Service Fabric cluster. + :param is_seed_node: Indicates if the node is a seed node or not. Returns true if the node is a + seed node, otherwise false. A quorum of seed nodes are required for proper operation of Service + Fabric cluster. :type is_seed_node: bool :param upgrade_domain: The upgrade domain of the node. :type upgrade_domain: str :param fault_domain: The fault domain of the node. :type fault_domain: str - :param id: An internal ID used by Service Fabric to uniquely identify a - node. Node Id is deterministically generated from node name. + :param id: An internal ID used by Service Fabric to uniquely identify a node. Node Id is + deterministically generated from node name. :type id: ~azure.servicefabric.models.NodeId - :param instance_id: The ID representing the node instance. While the ID of - the node is deterministically generated from the node name and remains - same across restarts, the InstanceId changes every time node restarts. + :param instance_id: The ID representing the node instance. While the ID of the node is + deterministically generated from the node name and remains same across restarts, the InstanceId + changes every time node restarts. :type instance_id: str - :param node_deactivation_info: Information about the node deactivation. - This information is valid for a node that is undergoing deactivation or - has already been deactivated. - :type node_deactivation_info: - ~azure.servicefabric.models.NodeDeactivationInfo - :param is_stopped: Indicates if the node is stopped by calling stop node - API or not. Returns true if the node is stopped, otherwise false. + :param node_deactivation_info: Information about the node deactivation. This information is + valid for a node that is undergoing deactivation or has already been deactivated. + :type node_deactivation_info: ~azure.servicefabric.models.NodeDeactivationInfo + :param is_stopped: Indicates if the node is stopped by calling stop node API or not. Returns + true if the node is stopped, otherwise false. :type is_stopped: bool - :param node_down_time_in_seconds: Time in seconds since the node has been - in NodeStatus Down. Value zero indicates node is not NodeStatus Down. + :param node_down_time_in_seconds: Time in seconds since the node has been in NodeStatus Down. + Value zero indicates node is not NodeStatus Down. :type node_down_time_in_seconds: str - :param node_up_at: Date time in UTC when the node came up. If the node has - never been up then this value will be zero date time. - :type node_up_at: datetime - :param node_down_at: Date time in UTC when the node went down. If node has - never been down then this value will be zero date time. - :type node_down_at: datetime + :param node_up_at: Date time in UTC when the node came up. If the node has never been up then + this value will be zero date time. + :type node_up_at: ~datetime.datetime + :param node_down_at: Date time in UTC when the node went down. If node has never been down then + this value will be zero date time. + :type node_down_at: ~datetime.datetime + :param node_tags: List that contains tags, which will be applied to the nodes. + :type node_tags: list[str] """ _attribute_map = { @@ -13175,9 +14441,13 @@ class NodeInfo(Model): 'node_down_time_in_seconds': {'key': 'NodeDownTimeInSeconds', 'type': 'str'}, 'node_up_at': {'key': 'NodeUpAt', 'type': 'iso-8601'}, 'node_down_at': {'key': 'NodeDownAt', 'type': 'iso-8601'}, + 'node_tags': {'key': 'NodeTags', 'type': '[str]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) @@ -13197,19 +14467,17 @@ def __init__(self, **kwargs): self.node_down_time_in_seconds = kwargs.get('node_down_time_in_seconds', None) self.node_up_at = kwargs.get('node_up_at', None) self.node_down_at = kwargs.get('node_down_at', None) + self.node_tags = kwargs.get('node_tags', None) -class NodeLoadInfo(Model): - """Information about load on a Service Fabric node. It holds a summary of all - metrics and their load on a node. +class NodeLoadInfo(msrest.serialization.Model): + """Information about load on a Service Fabric node. It holds a summary of all metrics and their load on a node. - :param node_name: Name of the node for which the load information is - provided by this object. + :param node_name: Name of the node for which the load information is provided by this object. :type node_name: str - :param node_load_metric_information: List that contains metrics and their - load information on this node. - :type node_load_metric_information: - list[~azure.servicefabric.models.NodeLoadMetricInformation] + :param node_load_metric_information: List that contains metrics and their load information on + this node. + :type node_load_metric_information: list[~azure.servicefabric.models.NodeLoadMetricInformation] """ _attribute_map = { @@ -13217,52 +14485,49 @@ class NodeLoadInfo(Model): 'node_load_metric_information': {'key': 'NodeLoadMetricInformation', 'type': '[NodeLoadMetricInformation]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeLoadInfo, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.node_load_metric_information = kwargs.get('node_load_metric_information', None) -class NodeLoadMetricInformation(Model): - """Represents data structure that contains load information for a certain - metric on a node. +class NodeLoadMetricInformation(msrest.serialization.Model): + """Represents data structure that contains load information for a certain metric on a node. - :param name: Name of the metric for which this load information is - provided. + :param name: Name of the metric for which this load information is provided. :type name: str :param node_capacity: Total capacity on the node for this metric. :type node_capacity: str - :param node_load: Current load on the node for this metric. In future - releases of Service Fabric this parameter will be deprecated in favor of - CurrentNodeLoad. + :param node_load: Current load on the node for this metric. In future releases of Service + Fabric this parameter will be deprecated in favor of CurrentNodeLoad. :type node_load: str - :param node_remaining_capacity: The remaining capacity on the node for - this metric. In future releases of Service Fabric this parameter will be - deprecated in favor of NodeCapacityRemaining. + :param node_remaining_capacity: The remaining capacity on the node for this metric. In future + releases of Service Fabric this parameter will be deprecated in favor of NodeCapacityRemaining. :type node_remaining_capacity: str - :param is_capacity_violation: Indicates if there is a capacity violation - for this metric on the node. + :param is_capacity_violation: Indicates if there is a capacity violation for this metric on the + node. :type is_capacity_violation: bool - :param node_buffered_capacity: The value that indicates the reserved - capacity for this metric on the node. + :param node_buffered_capacity: The value that indicates the reserved capacity for this metric + on the node. :type node_buffered_capacity: str - :param node_remaining_buffered_capacity: The remaining reserved capacity - for this metric on the node. In future releases of Service Fabric this - parameter will be deprecated in favor of BufferedNodeCapacityRemaining. + :param node_remaining_buffered_capacity: The remaining reserved capacity for this metric on the + node. In future releases of Service Fabric this parameter will be deprecated in favor of + BufferedNodeCapacityRemaining. :type node_remaining_buffered_capacity: str :param current_node_load: Current load on the node for this metric. :type current_node_load: str - :param node_capacity_remaining: The remaining capacity on the node for the - metric. + :param node_capacity_remaining: The remaining capacity on the node for the metric. :type node_capacity_remaining: str - :param buffered_node_capacity_remaining: The remaining capacity which is - not reserved by NodeBufferPercentage for this metric on the node. + :param buffered_node_capacity_remaining: The remaining capacity which is not reserved by + NodeBufferPercentage for this metric on the node. :type buffered_node_capacity_remaining: str - :param planned_node_load_removal: This value represents the load of the - replicas that are planned to be removed in the future. - This kind of load is reported for replicas that are currently being moving - to other nodes and for replicas that are currently being dropped but still - use the load on the source node. + :param planned_node_load_removal: This value represents the load of the replicas that are + planned to be removed in the future. + This kind of load is reported for replicas that are currently being moving to other nodes and + for replicas that are currently being dropped but still use the load on the source node. :type planned_node_load_removal: str """ @@ -13280,7 +14545,10 @@ class NodeLoadMetricInformation(Model): 'planned_node_load_removal': {'key': 'PlannedNodeLoadRemoval', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeLoadMetricInformation, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.node_capacity = kwargs.get('node_capacity', None) @@ -13300,18 +14568,38 @@ class NodeNewHealthReportEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -13328,17 +14616,16 @@ class NodeNewHealthReportEvent(NodeEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -13352,11 +14639,11 @@ class NodeNewHealthReportEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -13369,18 +14656,21 @@ class NodeNewHealthReportEvent(NodeEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeNewHealthReportEvent, self).__init__(**kwargs) - self.node_instance_id = kwargs.get('node_instance_id', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'NodeNewHealthReport' + self.kind = 'NodeNewHealthReport' # type: str + self.node_instance_id = kwargs['node_instance_id'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class NodeOpenFailedEvent(NodeEvent): @@ -13388,18 +14678,38 @@ class NodeOpenFailedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -13423,9 +14733,9 @@ class NodeOpenFailedEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -13439,11 +14749,11 @@ class NodeOpenFailedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -13456,18 +14766,21 @@ class NodeOpenFailedEvent(NodeEvent): 'error': {'key': 'Error', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeOpenFailedEvent, self).__init__(**kwargs) - self.node_instance = kwargs.get('node_instance', None) - self.node_id = kwargs.get('node_id', None) - self.upgrade_domain = kwargs.get('upgrade_domain', None) - self.fault_domain = kwargs.get('fault_domain', None) - self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) - self.hostname = kwargs.get('hostname', None) - self.is_seed_node = kwargs.get('is_seed_node', None) - self.node_version = kwargs.get('node_version', None) - self.error = kwargs.get('error', None) - self.kind = 'NodeOpenFailed' + self.kind = 'NodeOpenFailed' # type: str + self.node_instance = kwargs['node_instance'] + self.node_id = kwargs['node_id'] + self.upgrade_domain = kwargs['upgrade_domain'] + self.fault_domain = kwargs['fault_domain'] + self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] + self.hostname = kwargs['hostname'] + self.is_seed_node = kwargs['is_seed_node'] + self.node_version = kwargs['node_version'] + self.error = kwargs['error'] class NodeOpenSucceededEvent(NodeEvent): @@ -13475,18 +14788,38 @@ class NodeOpenSucceededEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -13508,9 +14841,9 @@ class NodeOpenSucceededEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -13523,11 +14856,11 @@ class NodeOpenSucceededEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -13539,17 +14872,20 @@ class NodeOpenSucceededEvent(NodeEvent): 'node_version': {'key': 'NodeVersion', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeOpenSucceededEvent, self).__init__(**kwargs) - self.node_instance = kwargs.get('node_instance', None) - self.node_id = kwargs.get('node_id', None) - self.upgrade_domain = kwargs.get('upgrade_domain', None) - self.fault_domain = kwargs.get('fault_domain', None) - self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) - self.hostname = kwargs.get('hostname', None) - self.is_seed_node = kwargs.get('is_seed_node', None) - self.node_version = kwargs.get('node_version', None) - self.kind = 'NodeOpenSucceeded' + self.kind = 'NodeOpenSucceeded' # type: str + self.node_instance = kwargs['node_instance'] + self.node_id = kwargs['node_id'] + self.upgrade_domain = kwargs['upgrade_domain'] + self.fault_domain = kwargs['fault_domain'] + self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] + self.hostname = kwargs['hostname'] + self.is_seed_node = kwargs['is_seed_node'] + self.node_version = kwargs['node_version'] class NodeRemovedFromClusterEvent(NodeEvent): @@ -13557,18 +14893,38 @@ class NodeRemovedFromClusterEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -13586,9 +14942,9 @@ class NodeRemovedFromClusterEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -13599,11 +14955,11 @@ class NodeRemovedFromClusterEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, @@ -13613,29 +14969,33 @@ class NodeRemovedFromClusterEvent(NodeEvent): 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeRemovedFromClusterEvent, self).__init__(**kwargs) - self.node_id = kwargs.get('node_id', None) - self.node_instance = kwargs.get('node_instance', None) - self.node_type = kwargs.get('node_type', None) - self.fabric_version = kwargs.get('fabric_version', None) - self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) - self.node_capacities = kwargs.get('node_capacities', None) - self.kind = 'NodeRemovedFromCluster' + self.kind = 'NodeRemovedFromCluster' # type: str + self.node_id = kwargs['node_id'] + self.node_instance = kwargs['node_instance'] + self.node_type = kwargs['node_type'] + self.fabric_version = kwargs['fabric_version'] + self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] + self.node_capacities = kwargs['node_capacities'] -class RepairImpactDescriptionBase(Model): +class RepairImpactDescriptionBase(msrest.serialization.Model): """Describes the expected impact of executing a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeRepairImpactDescription + sub-classes are: NodeRepairImpactDescription. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of repair impact represented by the current object.Constant + filled by server. Possible values include: "Invalid", "Node". + :type kind: str or ~azure.servicefabric.models.RepairImpactKind """ _validation = { @@ -13650,22 +15010,26 @@ class RepairImpactDescriptionBase(Model): 'kind': {'Node': 'NodeRepairImpactDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RepairImpactDescriptionBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class NodeRepairImpactDescription(RepairImpactDescriptionBase): """Describes the expected impact of a repair on a set of nodes. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param node_impact_list: The list of nodes impacted by a repair action and - their respective expected impact. + :param kind: Required. The kind of repair impact represented by the current object.Constant + filled by server. Possible values include: "Invalid", "Node". + :type kind: str or ~azure.servicefabric.models.RepairImpactKind + :param node_impact_list: The list of nodes impacted by a repair action and their respective + expected impact. :type node_impact_list: list[~azure.servicefabric.models.NodeImpact] """ @@ -13678,24 +15042,28 @@ class NodeRepairImpactDescription(RepairImpactDescriptionBase): 'node_impact_list': {'key': 'NodeImpactList', 'type': '[NodeImpact]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeRepairImpactDescription, self).__init__(**kwargs) + self.kind = 'Node' # type: str self.node_impact_list = kwargs.get('node_impact_list', None) - self.kind = 'Node' -class RepairTargetDescriptionBase(Model): +class RepairTargetDescriptionBase(msrest.serialization.Model): """Describes the entities targeted by a repair action. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeRepairTargetDescription + sub-classes are: NodeRepairTargetDescription. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of repair target described by the current object.Constant + filled by server. Possible values include: "Invalid", "Node". + :type kind: str or ~azure.servicefabric.models.RepairTargetKind """ _validation = { @@ -13710,20 +15078,24 @@ class RepairTargetDescriptionBase(Model): 'kind': {'Node': 'NodeRepairTargetDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RepairTargetDescriptionBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class NodeRepairTargetDescription(RepairTargetDescriptionBase): """Describes the list of nodes targeted by a repair action. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of repair target described by the current object.Constant + filled by server. Possible values include: "Invalid", "Node". + :type kind: str or ~azure.servicefabric.models.RepairTargetKind :param node_names: The list of nodes targeted by a repair action. :type node_names: list[str] """ @@ -13737,15 +15109,17 @@ class NodeRepairTargetDescription(RepairTargetDescriptionBase): 'node_names': {'key': 'NodeNames', 'type': '[str]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeRepairTargetDescription, self).__init__(**kwargs) + self.kind = 'Node' # type: str self.node_names = kwargs.get('node_names', None) - self.kind = 'Node' -class NodeResult(Model): - """Contains information about a node that was targeted by a user-induced - operation. +class NodeResult(msrest.serialization.Model): + """Contains information about a node that was targeted by a user-induced operation. :param node_name: The name of a Service Fabric node. :type node_name: str @@ -13758,41 +15132,45 @@ class NodeResult(Model): 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeResult, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.node_instance_id = kwargs.get('node_instance_id', None) class NodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for nodes, containing health evaluations for - each unhealthy node that impacted current aggregated health state. Can be - returned when evaluating cluster health and the aggregated health state is - either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for nodes, containing health evaluations for each unhealthy node that impacted current aggregated health state. Can be returned when evaluating cluster health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of - unhealthy nodes from the ClusterHealthPolicy. + :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes from the + ClusterHealthPolicy. :type max_percent_unhealthy_nodes: int :param total_count: Total number of nodes found in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -13800,36 +15178,66 @@ class NodesHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodesHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Nodes' # type: str self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Nodes' -class NodeTransitionProgress(Model): - """Information about an NodeTransition operation. This class contains an - OperationState and a NodeTransitionResult. The NodeTransitionResult is not - valid until OperationState - is Completed or Faulted. +class NodeTagsDescription(msrest.serialization.Model): + """Describes the tags required for placement or running of the service. + + All required parameters must be populated in order to send to Azure. + + :param count: Required. The number of tags. + :type count: int + :param tags: Required. A set of tags. Array of size specified by the ‘Count’ parameter, for the + placement tags of the service. + :type tags: list[str] + """ + + _validation = { + 'count': {'required': True}, + 'tags': {'required': True}, + } + + _attribute_map = { + 'count': {'key': 'Count', 'type': 'int'}, + 'tags': {'key': 'Tags', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(NodeTagsDescription, self).__init__(**kwargs) + self.count = kwargs['count'] + self.tags = kwargs['tags'] + - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' +class NodeTransitionProgress(msrest.serialization.Model): + """Information about an NodeTransition operation. This class contains an OperationState and a NodeTransitionResult. The NodeTransitionResult is not valid until OperationState +is Completed or Faulted. + + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param node_transition_result: Represents information about an operation - in a terminal state (Completed or Faulted). - :type node_transition_result: - ~azure.servicefabric.models.NodeTransitionResult + :param node_transition_result: Represents information about an operation in a terminal state + (Completed or Faulted). + :type node_transition_result: ~azure.servicefabric.models.NodeTransitionResult """ _attribute_map = { @@ -13837,21 +15245,23 @@ class NodeTransitionProgress(Model): 'node_transition_result': {'key': 'NodeTransitionResult', 'type': 'NodeTransitionResult'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeTransitionProgress, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.node_transition_result = kwargs.get('node_transition_result', None) -class NodeTransitionResult(Model): - """Represents information about an operation in a terminal state (Completed or - Faulted). +class NodeTransitionResult(msrest.serialization.Model): + """Represents information about an operation in a terminal state (Completed or Faulted). - :param error_code: If OperationState is Completed, this is 0. If - OperationState is Faulted, this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, + this is an error code indicating the reason. :type error_code: int - :param node_result: Contains information about a node that was targeted by - a user-induced operation. + :param node_result: Contains information about a node that was targeted by a user-induced + operation. :type node_result: ~azure.servicefabric.models.NodeResult """ @@ -13860,76 +15270,198 @@ class NodeTransitionResult(Model): 'node_result': {'key': 'NodeResult', 'type': 'NodeResult'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeTransitionResult, self).__init__(**kwargs) self.error_code = kwargs.get('error_code', None) self.node_result = kwargs.get('node_result', None) +class NodeTypeHealthPolicyMapItem(msrest.serialization.Model): + """Defines an item in NodeTypeHealthPolicyMap. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key of the node type health policy map item. This is the name of the + node type. + :type key: str + :param value: Required. The value of the node type health policy map item. + If the percentage is respected but there is at least one unhealthy node in the node type, the + health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy nodes over the total number + of nodes in the node type. + The computation rounds up to tolerate one failure on small numbers of nodes. + The max percent unhealthy nodes allowed for the node type. Must be between zero and 100. + :type value: int + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(NodeTypeHealthPolicyMapItem, self).__init__(**kwargs) + self.key = kwargs['key'] + self.value = kwargs['value'] + + +class NodeTypeNodesHealthEvaluation(HealthEvaluation): + """Represents health evaluation for nodes of a particular node type. The node type nodes evaluation can be returned when cluster health evaluation returns unhealthy aggregated health state, either Error or Warning. It contains health evaluations for each unhealthy node of the included node type that impacted current aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. + :type description: str + :param node_type_name: The node type name as defined in the cluster manifest. + :type node_type_name: str + :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes for the node + type, specified as an entry in NodeTypeHealthPolicyMap. + :type max_percent_unhealthy_nodes: int + :param total_count: Total number of nodes of the node type found in the health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy NodeHealthEvaluation of this node type that impacted the + aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'node_type_name': {'key': 'NodeTypeName', 'type': 'str'}, + 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__( + self, + **kwargs + ): + super(NodeTypeNodesHealthEvaluation, self).__init__(**kwargs) + self.kind = 'NodeTypeNodes' # type: str + self.node_type_name = kwargs.get('node_type_name', None) + self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + + class NodeUpEvent(NodeEvent): """Node Up event. All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. :type node_instance: long :param last_node_down_at: Required. Time when Node was last down. - :type last_node_down_at: datetime + :type last_node_down_at: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'last_node_down_at': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'last_node_down_at': {'key': 'LastNodeDownAt', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeUpEvent, self).__init__(**kwargs) - self.node_instance = kwargs.get('node_instance', None) - self.last_node_down_at = kwargs.get('last_node_down_at', None) - self.kind = 'NodeUp' + self.kind = 'NodeUp' # type: str + self.node_instance = kwargs['node_instance'] + self.last_node_down_at = kwargs['last_node_down_at'] -class NodeUpgradeProgressInfo(Model): +class NodeUpgradeProgressInfo(msrest.serialization.Model): """Information about the upgrading node and its status. :param node_name: The name of a Service Fabric node. :type node_name: str - :param upgrade_phase: The state of the upgrading node. Possible values - include: 'Invalid', 'PreUpgradeSafetyCheck', 'Upgrading', - 'PostUpgradeSafetyCheck' + :param upgrade_phase: The state of the upgrading node. Possible values include: "Invalid", + "PreUpgradeSafetyCheck", "Upgrading", "PostUpgradeSafetyCheck". :type upgrade_phase: str or ~azure.servicefabric.models.NodeUpgradePhase - :param pending_safety_checks: List of pending safety checks - :type pending_safety_checks: - list[~azure.servicefabric.models.SafetyCheckWrapper] + :param pending_safety_checks: List of pending safety checks. + :type pending_safety_checks: list[~azure.servicefabric.models.SafetyCheckWrapper] """ _attribute_map = { @@ -13938,27 +15470,27 @@ class NodeUpgradeProgressInfo(Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(NodeUpgradeProgressInfo, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.upgrade_phase = kwargs.get('upgrade_phase', None) self.pending_safety_checks = kwargs.get('pending_safety_checks', None) -class OperationStatus(Model): - """Contains the OperationId, OperationState, and OperationType for - user-induced operations. +class OperationStatus(msrest.serialization.Model): + """Contains the OperationId, OperationState, and OperationType for user-induced operations. - :param operation_id: A GUID that identifies a call to this API. This is - also passed into the corresponding GetProgress API. + :param operation_id: A GUID that identifies a call to this API. This is also passed into the + corresponding GetProgress API. :type operation_id: str - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param type: The type of the operation. Possible values include: - 'Invalid', 'PartitionDataLoss', 'PartitionQuorumLoss', 'PartitionRestart', - 'NodeTransition' + :param type: The type of the operation. Possible values include: "Invalid", + "PartitionDataLoss", "PartitionQuorumLoss", "PartitionRestart", "NodeTransition". :type type: str or ~azure.servicefabric.models.OperationType """ @@ -13968,25 +15500,26 @@ class OperationStatus(Model): 'type': {'key': 'Type', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(OperationStatus, self).__init__(**kwargs) self.operation_id = kwargs.get('operation_id', None) self.state = kwargs.get('state', None) self.type = kwargs.get('type', None) -class PackageSharingPolicyInfo(Model): +class PackageSharingPolicyInfo(msrest.serialization.Model): """Represents a policy for the package sharing. - :param shared_package_name: The name of code, configuration or data - package that should be shared. + :param shared_package_name: The name of code, configuration or data package that should be + shared. :type shared_package_name: str - :param package_sharing_scope: Represents the scope for - PackageSharingPolicy. This is specified during DeployServicePackageToNode - operation. Possible values include: 'None', 'All', 'Code', 'Config', - 'Data' - :type package_sharing_scope: str or - ~azure.servicefabric.models.PackageSharingPolicyScope + :param package_sharing_scope: Represents the scope for PackageSharingPolicy. This is specified + during DeployServicePackageToNode operation. Possible values include: "None", "All", "Code", + "Config", "Data". + :type package_sharing_scope: str or ~azure.servicefabric.models.PackageSharingPolicyScope """ _attribute_map = { @@ -13994,24 +15527,23 @@ class PackageSharingPolicyInfo(Model): 'package_sharing_scope': {'key': 'PackageSharingScope', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PackageSharingPolicyInfo, self).__init__(**kwargs) self.shared_package_name = kwargs.get('shared_package_name', None) self.package_sharing_scope = kwargs.get('package_sharing_scope', None) -class PagedApplicationInfoList(Model): - """The list of applications in the cluster. The list is paged when all of the - results cannot fit in a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in this list. +class PagedApplicationInfoList(msrest.serialization.Model): + """The list of applications in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of application information. :type items: list[~azure.servicefabric.models.ApplicationInfo] @@ -14022,28 +15554,26 @@ class PagedApplicationInfoList(Model): 'items': {'key': 'Items', 'type': '[ApplicationInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedApplicationInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedApplicationResourceDescriptionList(Model): - """The list of application resources. The list is paged when all of the - results cannot fit in a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in this list. +class PagedApplicationResourceDescriptionList(msrest.serialization.Model): + """The list of application resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. - :type items: - list[~azure.servicefabric.models.ApplicationResourceDescription] + :type items: list[~azure.servicefabric.models.ApplicationResourceDescription] """ _attribute_map = { @@ -14051,24 +15581,23 @@ class PagedApplicationResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[ApplicationResourceDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedApplicationResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedApplicationTypeInfoList(Model): - """The list of application types that are provisioned or being provisioned in - the cluster. The list is paged when all of the results cannot fit in a - single message. The next set of results can be obtained by executing the - same query with the continuation token provided in this list. +class PagedApplicationTypeInfoList(msrest.serialization.Model): + """The list of application types that are provisioned or being provisioned in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of application type information. :type items: list[~azure.servicefabric.models.ApplicationTypeInfo] @@ -14079,24 +15608,23 @@ class PagedApplicationTypeInfoList(Model): 'items': {'key': 'Items', 'type': '[ApplicationTypeInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedApplicationTypeInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedBackupConfigurationInfoList(Model): - """The list of backup configuration information. The list is paged when all of - the results cannot fit in a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in this list. +class PagedBackupConfigurationInfoList(msrest.serialization.Model): + """The list of backup configuration information. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of backup configuration information. :type items: list[~azure.servicefabric.models.BackupConfigurationInfo] @@ -14107,24 +15635,23 @@ class PagedBackupConfigurationInfoList(Model): 'items': {'key': 'Items', 'type': '[BackupConfigurationInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedBackupConfigurationInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedBackupEntityList(Model): - """The list of backup entities that are being periodically backed. The list is - paged when all of the results cannot fit in a single message. The next set - of results can be obtained by executing the same query with the - continuation token provided in this list. +class PagedBackupEntityList(msrest.serialization.Model): + """The list of backup entities that are being periodically backed. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of backup entity information. :type items: list[~azure.servicefabric.models.BackupEntity] @@ -14135,23 +15662,23 @@ class PagedBackupEntityList(Model): 'items': {'key': 'Items', 'type': '[BackupEntity]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedBackupEntityList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedBackupInfoList(Model): - """The list of backups. The list is paged when all of the results cannot fit - in a single message. The next set of results can be obtained by executing - the same query with the continuation token provided in this list. +class PagedBackupInfoList(msrest.serialization.Model): + """The list of backups. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of backup information. :type items: list[~azure.servicefabric.models.BackupInfo] @@ -14162,24 +15689,23 @@ class PagedBackupInfoList(Model): 'items': {'key': 'Items', 'type': '[BackupInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedBackupInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedBackupPolicyDescriptionList(Model): - """The list of backup policies configured in the cluster. The list is paged - when all of the results cannot fit in a single message. The next set of - results can be obtained by executing the same query with the continuation - token provided in this list. +class PagedBackupPolicyDescriptionList(msrest.serialization.Model): + """The list of backup policies configured in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: The list of backup policies information. :type items: list[~azure.servicefabric.models.BackupPolicyDescription] @@ -14190,24 +15716,23 @@ class PagedBackupPolicyDescriptionList(Model): 'items': {'key': 'Items', 'type': '[BackupPolicyDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedBackupPolicyDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedComposeDeploymentStatusInfoList(Model): - """The list of compose deployments in the cluster. The list is paged when all - of the results cannot fit in a single message. The next set of results can - be obtained by executing the same query with the continuation token - provided in this list. +class PagedComposeDeploymentStatusInfoList(msrest.serialization.Model): + """The list of compose deployments in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of compose deployment status information. :type items: list[~azure.servicefabric.models.ComposeDeploymentStatusInfo] @@ -14218,25 +15743,25 @@ class PagedComposeDeploymentStatusInfoList(Model): 'items': {'key': 'Items', 'type': '[ComposeDeploymentStatusInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedComposeDeploymentStatusInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedDeployedApplicationInfoList(Model): - """The list of deployed applications in activating, downloading, or active - states on a node. - The list is paged when all of the results cannot fit in a single message. - The next set of results can be obtained by executing the same query with - the continuation token provided in this list. +class PagedDeployedApplicationInfoList(msrest.serialization.Model): + """The list of deployed applications in activating, downloading, or active states on a node. +The list is paged when all of the results cannot fit in a single message. +The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of deployed application information. :type items: list[~azure.servicefabric.models.DeployedApplicationInfo] @@ -14247,23 +15772,23 @@ class PagedDeployedApplicationInfoList(Model): 'items': {'key': 'Items', 'type': '[DeployedApplicationInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedDeployedApplicationInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedGatewayResourceDescriptionList(Model): - """The list of gateway resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedGatewayResourceDescriptionList(msrest.serialization.Model): + """The list of gateway resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.GatewayResourceDescription] @@ -14274,23 +15799,23 @@ class PagedGatewayResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[GatewayResourceDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedGatewayResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedNetworkResourceDescriptionList(Model): - """The list of network resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedNetworkResourceDescriptionList(msrest.serialization.Model): + """The list of network resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.NetworkResourceDescription] @@ -14301,23 +15826,23 @@ class PagedNetworkResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[NetworkResourceDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedNetworkResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedNodeInfoList(Model): - """The list of nodes in the cluster. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedNodeInfoList(msrest.serialization.Model): + """The list of nodes in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of node information. :type items: list[~azure.servicefabric.models.NodeInfo] @@ -14328,28 +15853,26 @@ class PagedNodeInfoList(Model): 'items': {'key': 'Items', 'type': '[NodeInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedNodeInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedPropertyInfoList(Model): - """The paged list of Service Fabric properties under a given name. The list is - paged when all of the results cannot fit in a single message. The next set - of results can be obtained by executing the same query with the - continuation token provided in this list. +class PagedPropertyInfoList(msrest.serialization.Model): + """The paged list of Service Fabric properties under a given name. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str - :param is_consistent: Indicates whether any property under the given name - has been modified during the enumeration. If there was a modification, - this property value is false. + :param is_consistent: Indicates whether any property under the given name has been modified + during the enumeration. If there was a modification, this property value is false. :type is_consistent: bool :param properties: List of property information. :type properties: list[~azure.servicefabric.models.PropertyInfo] @@ -14361,25 +15884,24 @@ class PagedPropertyInfoList(Model): 'properties': {'key': 'Properties', 'type': '[PropertyInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedPropertyInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.is_consistent = kwargs.get('is_consistent', None) self.properties = kwargs.get('properties', None) -class PagedReplicaInfoList(Model): - """The list of replicas in the cluster for a given partition. The list is - paged when all of the results cannot fit in a single message. The next set - of results can be obtained by executing the same query with the - continuation token provided in this list. +class PagedReplicaInfoList(msrest.serialization.Model): + """The list of replicas in the cluster for a given partition. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of replica information. :type items: list[~azure.servicefabric.models.ReplicaInfo] @@ -14390,23 +15912,23 @@ class PagedReplicaInfoList(Model): 'items': {'key': 'Items', 'type': '[ReplicaInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedReplicaInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedSecretResourceDescriptionList(Model): - """The list of secret resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedSecretResourceDescriptionList(msrest.serialization.Model): + """The list of secret resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.SecretResourceDescription] @@ -14417,28 +15939,26 @@ class PagedSecretResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[SecretResourceDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedSecretResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedSecretValueResourceDescriptionList(Model): - """The list of values of a secret resource, paged if the number of results - exceeds the limits of a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in the previous page. +class PagedSecretValueResourceDescriptionList(msrest.serialization.Model): + """The list of values of a secret resource, paged if the number of results exceeds the limits of a single message. The next set of results can be obtained by executing the same query with the continuation token provided in the previous page. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. - :type items: - list[~azure.servicefabric.models.SecretValueResourceDescription] + :type items: list[~azure.servicefabric.models.SecretValueResourceDescription] """ _attribute_map = { @@ -14446,24 +15966,23 @@ class PagedSecretValueResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[SecretValueResourceDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedSecretValueResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedServiceInfoList(Model): - """The list of services in the cluster for an application. The list is paged - when all of the results cannot fit in a single message. The next set of - results can be obtained by executing the same query with the continuation - token provided in this list. +class PagedServiceInfoList(msrest.serialization.Model): + """The list of services in the cluster for an application. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of service information. :type items: list[~azure.servicefabric.models.ServiceInfo] @@ -14474,24 +15993,23 @@ class PagedServiceInfoList(Model): 'items': {'key': 'Items', 'type': '[ServiceInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedServiceInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedServicePartitionInfoList(Model): - """The list of partition in the cluster for a service. The list is paged when - all of the results cannot fit in a single message. The next set of results - can be obtained by executing the same query with the continuation token - provided in this list. +class PagedServicePartitionInfoList(msrest.serialization.Model): + """The list of partition in the cluster for a service. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of service partition information. :type items: list[~azure.servicefabric.models.ServicePartitionInfo] @@ -14502,24 +16020,23 @@ class PagedServicePartitionInfoList(Model): 'items': {'key': 'Items', 'type': '[ServicePartitionInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedServicePartitionInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedServiceReplicaDescriptionList(Model): - """The list of service resource replicas in the cluster. The list is paged - when all of the results cannot fit in a single message. The next set of - results can be obtained by executing the same query with the continuation - token provided in this list. +class PagedServiceReplicaDescriptionList(msrest.serialization.Model): + """The list of service resource replicas in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of service resource replica description. :type items: list[~azure.servicefabric.models.ServiceReplicaDescription] @@ -14530,23 +16047,23 @@ class PagedServiceReplicaDescriptionList(Model): 'items': {'key': 'Items', 'type': '[ServiceReplicaDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedServiceReplicaDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedServiceResourceDescriptionList(Model): - """The list of service resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedServiceResourceDescriptionList(msrest.serialization.Model): + """The list of service resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.ServiceResourceDescription] @@ -14557,28 +16074,26 @@ class PagedServiceResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[ServiceResourceDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedServiceResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedSubNameInfoList(Model): - """A paged list of Service Fabric names. The list is paged when all of the - results cannot fit in a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in this list. +class PagedSubNameInfoList(msrest.serialization.Model): + """A paged list of Service Fabric names. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str - :param is_consistent: Indicates whether any name under the given name has - been modified during the enumeration. If there was a modification, this - property value is false. + :param is_consistent: Indicates whether any name under the given name has been modified during + the enumeration. If there was a modification, this property value is false. :type is_consistent: bool :param sub_names: List of the child names. :type sub_names: list[str] @@ -14590,25 +16105,24 @@ class PagedSubNameInfoList(Model): 'sub_names': {'key': 'SubNames', 'type': '[str]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedSubNameInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.is_consistent = kwargs.get('is_consistent', None) self.sub_names = kwargs.get('sub_names', None) -class PagedUpdatePartitionLoadResultList(Model): - """The list of results of the call UpdatePartitionLoad. The list is paged when - all of the results cannot fit in a single message. The next set of results - can be obtained by executing the same query with the continuation token - provided in this list. +class PagedUpdatePartitionLoadResultList(msrest.serialization.Model): + """The list of results of the call UpdatePartitionLoad. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of partition load update information. :type items: list[~azure.servicefabric.models.UpdatePartitionLoadResult] @@ -14619,23 +16133,23 @@ class PagedUpdatePartitionLoadResultList(Model): 'items': {'key': 'Items', 'type': '[UpdatePartitionLoadResult]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedUpdatePartitionLoadResultList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedVolumeResourceDescriptionList(Model): - """The list of volume resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedVolumeResourceDescriptionList(msrest.serialization.Model): + """The list of volume resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.VolumeResourceDescription] @@ -14646,7 +16160,10 @@ class PagedVolumeResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[VolumeResourceDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PagedVolumeResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) @@ -14656,46 +16173,65 @@ class PartitionAnalysisEvent(PartitionEvent): """Represents the base for all Partition Analysis Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionPrimaryMoveAnalysisEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: PartitionPrimaryMoveAnalysisEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param metadata: Required. Metadata about an Analysis Event. :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'metadata': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, } @@ -14704,32 +16240,33 @@ class PartitionAnalysisEvent(PartitionEvent): 'kind': {'PartitionPrimaryMoveAnalysis': 'PartitionPrimaryMoveAnalysisEvent'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionAnalysisEvent, self).__init__(**kwargs) - self.metadata = kwargs.get('metadata', None) - self.kind = 'PartitionAnalysisEvent' + self.kind = 'PartitionAnalysisEvent' # type: str + self.metadata = kwargs['metadata'] class PartitionBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information, for a specific partition, specifying what - backup policy is being applied and suspend description, if any. + """Backup configuration information, for a specific partition, specifying what backup policy is being applied and suspend description, if any. All required parameters must be populated in order to send to Azure. - :param policy_name: The name of the backup policy which is applicable to - this Service Fabric application or service or partition. + :param kind: Required. The entity type of a Service Fabric entity such as Application, Service + or a Partition where periodic backups can be enabled.Constant filled by server. Possible + values include: "Invalid", "Partition", "Service", "Application". + :type kind: str or ~azure.servicefabric.models.BackupEntityKind + :param policy_name: The name of the backup policy which is applicable to this Service Fabric + application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup - policy is applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type policy_inherited_from: str or - ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup policy is applied. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param kind: Required. Constant filled by server. - :type kind: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str :param partition_id: The partition ID identifying the partition. :type partition_id: str @@ -14740,19 +16277,22 @@ class PartitionBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionBackupConfigurationInfo, self).__init__(**kwargs) + self.kind = 'Partition' # type: str self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) - self.kind = 'Partition' class PartitionBackupEntity(BackupEntity): @@ -14760,10 +16300,11 @@ class PartitionBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. Constant filled by server. - :type entity_kind: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, + Service or a Partition where periodic backups can be enabled.Constant filled by server. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str :param partition_id: The partition ID identifying the partition. :type partition_id: str @@ -14779,24 +16320,25 @@ class PartitionBackupEntity(BackupEntity): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionBackupEntity, self).__init__(**kwargs) + self.entity_kind = 'Partition' # type: str self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) - self.entity_kind = 'Partition' -class PartitionDataLossProgress(Model): +class PartitionDataLossProgress(msrest.serialization.Model): """Information about a partition data loss user-induced operation. - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param invoke_data_loss_result: Represents information about an operation - in a terminal state (Completed or Faulted). - :type invoke_data_loss_result: - ~azure.servicefabric.models.InvokeDataLossResult + :param invoke_data_loss_result: Represents information about an operation in a terminal state + (Completed or Faulted). + :type invoke_data_loss_result: ~azure.servicefabric.models.InvokeDataLossResult """ _attribute_map = { @@ -14804,7 +16346,10 @@ class PartitionDataLossProgress(Model): 'invoke_data_loss_result': {'key': 'InvokeDataLossResult', 'type': 'InvokeDataLossResult'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionDataLossProgress, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.invoke_data_loss_result = kwargs.get('invoke_data_loss_result', None) @@ -14813,30 +16358,25 @@ def __init__(self, **kwargs): class PartitionHealth(EntityHealth): """Information about the health of a Service Fabric partition. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param partition_id: ID of the partition whose health information is - described by this object. + :param partition_id: ID of the partition whose health information is described by this object. :type partition_id: str - :param replica_health_states: The list of replica health states associated - with the partition. - :type replica_health_states: - list[~azure.servicefabric.models.ReplicaHealthState] + :param replica_health_states: The list of replica health states associated with the partition. + :type replica_health_states: list[~azure.servicefabric.models.ReplicaHealthState] """ _attribute_map = { @@ -14848,40 +16388,43 @@ class PartitionHealth(EntityHealth): 'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionHealth, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.replica_health_states = kwargs.get('replica_health_states', None) class PartitionHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a partition, containing information about - the data and the algorithm used by health store to evaluate health. The - evaluation is returned only when the aggregated health state is either - Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a partition, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition whose health evaluation is - described by this object. + :param partition_id: Id of the partition whose health evaluation is described by this object. :type partition_id: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the partition. The types of the - unhealthy evaluations can be ReplicasHealthEvaluation or - EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the partition. The types of the unhealthy evaluations can be + ReplicasHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -14889,18 +16432,21 @@ class PartitionHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Partition' # type: str self.partition_id = kwargs.get('partition_id', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Partition' class PartitionHealthReportExpiredEvent(PartitionEvent): @@ -14908,23 +16454,42 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param source_id: Required. Id of report source. :type source_id: str @@ -14938,17 +16503,16 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, @@ -14961,11 +16525,11 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, @@ -14977,31 +16541,30 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionHealthReportExpiredEvent, self).__init__(**kwargs) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'PartitionHealthReportExpired' + self.kind = 'PartitionHealthReportExpired' # type: str + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class PartitionHealthState(EntityHealthState): - """Represents the health state of a partition, which contains the partition - identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param partition_id: Id of the partition whose health state is described - by this object. + """Represents the health state of a partition, which contains the partition identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param partition_id: Id of the partition whose health state is described by this object. :type partition_id: str """ @@ -15010,27 +16573,26 @@ class PartitionHealthState(EntityHealthState): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionHealthState, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) class PartitionHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a partition, which contains the - partition ID, its aggregated health state and any replicas that respect the - filters in the cluster health chunk query description. + """Represents the health state chunk of a partition, which contains the partition ID, its aggregated health state and any replicas that respect the filters in the cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param partition_id: The Id of the partition. :type partition_id: str - :param replica_health_state_chunks: The list of replica health state - chunks belonging to the partition that respect the filters in the cluster - health chunk query description. - :type replica_health_state_chunks: - ~azure.servicefabric.models.ReplicaHealthStateChunkList + :param replica_health_state_chunks: The list of replica health state chunks belonging to the + partition that respect the filters in the cluster health chunk query description. + :type replica_health_state_chunks: ~azure.servicefabric.models.ReplicaHealthStateChunkList """ _attribute_map = { @@ -15039,20 +16601,21 @@ class PartitionHealthStateChunk(EntityHealthStateChunk): 'replica_health_state_chunks': {'key': 'ReplicaHealthStateChunks', 'type': 'ReplicaHealthStateChunkList'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionHealthStateChunk, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.replica_health_state_chunks = kwargs.get('replica_health_state_chunks', None) -class PartitionHealthStateChunkList(Model): - """The list of partition health state chunks that respect the input filters in - the chunk query description. - Returned by get cluster health state chunks query as part of the parent - application hierarchy. +class PartitionHealthStateChunkList(msrest.serialization.Model): + """The list of partition health state chunks that respect the input filters in the chunk query description. +Returned by get cluster health state chunks query as part of the parent application hierarchy. - :param items: The list of partition health state chunks that respect the - input filters in the chunk query. + :param items: The list of partition health state chunks that respect the input filters in the + chunk query. :type items: list[~azure.servicefabric.models.PartitionHealthStateChunk] """ @@ -15060,68 +16623,58 @@ class PartitionHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[PartitionHealthStateChunk]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class PartitionHealthStateFilter(Model): - """Defines matching criteria to determine whether a partition should be - included as a child of a service in the cluster health chunk. - The partitions are only returned if the parent entities match a filter - specified in the cluster health chunk query description. The parent service - and application must be included in the cluster health chunk. - One filter can match zero, one or multiple partitions, depending on its - properties. - - :param partition_id_filter: ID of the partition that matches the filter. - The filter is applied only to the specified partition, if it exists. - If the partition doesn't exist, no partition is returned in the cluster - health chunk based on this filter. - If the partition exists, it is included in the cluster health chunk if it - respects the other filter properties. - If not specified, all partitions that match the parent filters (if any) - are taken into consideration and matched against the other filter members, - like health state filter. +class PartitionHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a partition should be included as a child of a service in the cluster health chunk. +The partitions are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent service and application must be included in the cluster health chunk. +One filter can match zero, one or multiple partitions, depending on its properties. + + :param partition_id_filter: ID of the partition that matches the filter. The filter is applied + only to the specified partition, if it exists. + If the partition doesn't exist, no partition is returned in the cluster health chunk based on + this filter. + If the partition exists, it is included in the cluster health chunk if it respects the other + filter properties. + If not specified, all partitions that match the parent filters (if any) are taken into + consideration and matched against the other filter members, like health state filter. :type partition_id_filter: str - :param health_state_filter: The filter for the health state of the - partitions. It allows selecting partitions if they match the desired - health states. - The possible values are integer value of one of the following health - states. Only partitions that match the filter are returned. All partitions - are used to evaluate the cluster aggregated health state. - If not specified, default value is None, unless the partition ID is - specified. If the filter has default value and partition ID is specified, - the matching partition is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches partitions with - HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the partitions. It allows + selecting partitions if they match the desired health states. + The possible values are integer value of one of the following health states. Only partitions + that match the filter are returned. All partitions are used to evaluate the cluster aggregated + health state. + If not specified, default value is None, unless the partition ID is specified. If the filter + has default value and partition ID is specified, the matching partition is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches partitions with HealthState value of OK + (2) and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int - :param replica_filters: Defines a list of filters that specify which - replicas to be included in the returned cluster health chunk as children - of the parent partition. The replicas are returned only if the parent - partition matches a filter. - If the list is empty, no replicas are returned. All the replicas are used - to evaluate the parent partition aggregated health state, regardless of - the input filters. + :param replica_filters: Defines a list of filters that specify which replicas to be included in + the returned cluster health chunk as children of the parent partition. The replicas are + returned only if the parent partition matches a filter. + If the list is empty, no replicas are returned. All the replicas are used to evaluate the + parent partition aggregated health state, regardless of the input filters. The partition filter may specify multiple replica filters. - For example, it can specify a filter to return all replicas with health - state Error and another filter to always include a replica identified by - its replica id. - :type replica_filters: - list[~azure.servicefabric.models.ReplicaHealthStateFilter] + For example, it can specify a filter to return all replicas with health state Error and + another filter to always include a replica identified by its replica id. + :type replica_filters: list[~azure.servicefabric.models.ReplicaHealthStateFilter] """ _attribute_map = { @@ -15130,7 +16683,10 @@ class PartitionHealthStateFilter(Model): 'replica_filters': {'key': 'ReplicaFilters', 'type': '[ReplicaHealthStateFilter]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionHealthStateFilter, self).__init__(**kwargs) self.partition_id_filter = kwargs.get('partition_id_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) @@ -15138,21 +16694,20 @@ def __init__(self, **kwargs): class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): - """Represents a scaling mechanism for adding or removing instances of - stateless service partition. + """Represents a scaling mechanism for adding or removing instances of stateless service partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param min_instance_count: Required. Minimum number of instances of the - partition. + :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. + Possible values include: "Invalid", "PartitionInstanceCount", + "AddRemoveIncrementalNamedPartition". + :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind + :param min_instance_count: Required. Minimum number of instances of the partition. :type min_instance_count: int - :param max_instance_count: Required. Maximum number of instances of the - partition. + :param max_instance_count: Required. Maximum number of instances of the partition. :type max_instance_count: int - :param scale_increment: Required. The number of instances to add or remove - during a scaling operation. + :param scale_increment: Required. The number of instances to add or remove during a scaling + operation. :type scale_increment: int """ @@ -15170,32 +16725,31 @@ class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionInstanceCountScaleMechanism, self).__init__(**kwargs) - self.min_instance_count = kwargs.get('min_instance_count', None) - self.max_instance_count = kwargs.get('max_instance_count', None) - self.scale_increment = kwargs.get('scale_increment', None) - self.kind = 'PartitionInstanceCount' + self.kind = 'PartitionInstanceCount' # type: str + self.min_instance_count = kwargs['min_instance_count'] + self.max_instance_count = kwargs['max_instance_count'] + self.scale_increment = kwargs['scale_increment'] -class PartitionLoadInformation(Model): - """Represents load information for a partition, which contains the primary and - secondary reported load metrics. - In case there is no load reported, PartitionLoadInformation will contain - the default load for the service of the partition. - For default loads, LoadMetricReport's LastReportedUtc is set to 0. +class PartitionLoadInformation(msrest.serialization.Model): + """Represents load information for a partition, which contains the primary and secondary reported load metrics. +In case there is no load reported, PartitionLoadInformation will contain the default load for the service of the partition. +For default loads, LoadMetricReport's LastReportedUtc is set to 0. :param partition_id: Id of the partition. :type partition_id: str - :param primary_load_metric_reports: Array of load reports from the primary - replica for this partition. - :type primary_load_metric_reports: - list[~azure.servicefabric.models.LoadMetricReport] - :param secondary_load_metric_reports: Array of aggregated load reports - from all secondary replicas for this partition. + :param primary_load_metric_reports: Array of load reports from the primary replica for this + partition. + :type primary_load_metric_reports: list[~azure.servicefabric.models.LoadMetricReport] + :param secondary_load_metric_reports: Array of aggregated load reports from all secondary + replicas for this partition. Array only contains the latest reported load for each metric. - :type secondary_load_metric_reports: - list[~azure.servicefabric.models.LoadMetricReport] + :type secondary_load_metric_reports: list[~azure.servicefabric.models.LoadMetricReport] """ _attribute_map = { @@ -15204,31 +16758,30 @@ class PartitionLoadInformation(Model): 'secondary_load_metric_reports': {'key': 'SecondaryLoadMetricReports', 'type': '[LoadMetricReport]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionLoadInformation, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.primary_load_metric_reports = kwargs.get('primary_load_metric_reports', None) self.secondary_load_metric_reports = kwargs.get('secondary_load_metric_reports', None) -class PartitionMetricLoadDescription(Model): - """Represents load information for a partition, which contains the metrics - load information about primary, all secondary replicas/instances or a - specific secondary replica/instance located on a specific node. +class PartitionMetricLoadDescription(msrest.serialization.Model): + """Represents load information for a partition, which contains the metrics load information about primary, all secondary replicas/instances or a specific secondary replica/instance located on a specific node. :param partition_id: Id of the partition. :type partition_id: str - :param primary_replica_load_entries: Partition's load information for - primary replica, in case partition is from a stateful service. - :type primary_replica_load_entries: - list[~azure.servicefabric.models.MetricLoadDescription] - :param secondary_replicas_or_instances_load_entries: Partition's load - information for all secondary replicas or instances. + :param primary_replica_load_entries: Partition's load information for primary replica, in case + partition is from a stateful service. + :type primary_replica_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] + :param secondary_replicas_or_instances_load_entries: Partition's load information for all + secondary replicas or instances. :type secondary_replicas_or_instances_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] - :param secondary_replica_or_instance_load_entries_per_node: Partition's - load information for a specific secondary replica or instance located on a - specific node. + :param secondary_replica_or_instance_load_entries_per_node: Partition's load information for a + specific secondary replica or instance located on a specific node. :type secondary_replica_or_instance_load_entries_per_node: list[~azure.servicefabric.models.ReplicaMetricLoadDescription] """ @@ -15240,7 +16793,10 @@ class PartitionMetricLoadDescription(Model): 'secondary_replica_or_instance_load_entries_per_node': {'key': 'SecondaryReplicaOrInstanceLoadEntriesPerNode', 'type': '[ReplicaMetricLoadDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionMetricLoadDescription, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.primary_replica_load_entries = kwargs.get('primary_replica_load_entries', None) @@ -15253,23 +16809,42 @@ class PartitionNewHealthReportEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param source_id: Required. Id of report source. :type source_id: str @@ -15283,17 +16858,16 @@ class PartitionNewHealthReportEvent(PartitionEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, @@ -15306,11 +16880,11 @@ class PartitionNewHealthReportEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, @@ -15322,17 +16896,20 @@ class PartitionNewHealthReportEvent(PartitionEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionNewHealthReportEvent, self).__init__(**kwargs) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'PartitionNewHealthReport' + self.kind = 'PartitionNewHealthReport' # type: str + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): @@ -15340,28 +16917,47 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param metadata: Required. Metadata about an Analysis Event. :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata :param when_move_completed: Required. Time when the move was completed. - :type when_move_completed: datetime + :type when_move_completed: ~datetime.datetime :param previous_node: Required. The name of a Service Fabric node. :type previous_node: str :param current_node: Required. The name of a Service Fabric node. @@ -15373,9 +16969,9 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'metadata': {'required': True}, 'when_move_completed': {'required': True}, @@ -15386,11 +16982,11 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, 'when_move_completed': {'key': 'WhenMoveCompleted', 'type': 'iso-8601'}, @@ -15400,27 +16996,28 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): 'relevant_traces': {'key': 'RelevantTraces', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionPrimaryMoveAnalysisEvent, self).__init__(**kwargs) - self.when_move_completed = kwargs.get('when_move_completed', None) - self.previous_node = kwargs.get('previous_node', None) - self.current_node = kwargs.get('current_node', None) - self.move_reason = kwargs.get('move_reason', None) - self.relevant_traces = kwargs.get('relevant_traces', None) - self.kind = 'PartitionPrimaryMoveAnalysis' + self.kind = 'PartitionPrimaryMoveAnalysis' # type: str + self.when_move_completed = kwargs['when_move_completed'] + self.previous_node = kwargs['previous_node'] + self.current_node = kwargs['current_node'] + self.move_reason = kwargs['move_reason'] + self.relevant_traces = kwargs['relevant_traces'] -class PartitionQuorumLossProgress(Model): +class PartitionQuorumLossProgress(msrest.serialization.Model): """Information about a partition quorum loss user-induced operation. - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param invoke_quorum_loss_result: Represents information about an - operation in a terminal state (Completed or Faulted). - :type invoke_quorum_loss_result: - ~azure.servicefabric.models.InvokeQuorumLossResult + :param invoke_quorum_loss_result: Represents information about an operation in a terminal state + (Completed or Faulted). + :type invoke_quorum_loss_result: ~azure.servicefabric.models.InvokeQuorumLossResult """ _attribute_map = { @@ -15428,7 +17025,10 @@ class PartitionQuorumLossProgress(Model): 'invoke_quorum_loss_result': {'key': 'InvokeQuorumLossResult', 'type': 'InvokeQuorumLossResult'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionQuorumLossProgress, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.invoke_quorum_loss_result = kwargs.get('invoke_quorum_loss_result', None) @@ -15439,23 +17039,42 @@ class PartitionReconfiguredEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -15486,9 +17105,9 @@ class PartitionReconfiguredEvent(PartitionEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, @@ -15506,11 +17125,11 @@ class PartitionReconfiguredEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, @@ -15527,35 +17146,36 @@ class PartitionReconfiguredEvent(PartitionEvent): 'total_duration_ms': {'key': 'TotalDurationMs', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionReconfiguredEvent, self).__init__(**kwargs) - self.node_name = kwargs.get('node_name', None) - self.node_instance_id = kwargs.get('node_instance_id', None) - self.service_type = kwargs.get('service_type', None) - self.cc_epoch_data_loss_version = kwargs.get('cc_epoch_data_loss_version', None) - self.cc_epoch_config_version = kwargs.get('cc_epoch_config_version', None) - self.reconfig_type = kwargs.get('reconfig_type', None) - self.result = kwargs.get('result', None) - self.phase0_duration_ms = kwargs.get('phase0_duration_ms', None) - self.phase1_duration_ms = kwargs.get('phase1_duration_ms', None) - self.phase2_duration_ms = kwargs.get('phase2_duration_ms', None) - self.phase3_duration_ms = kwargs.get('phase3_duration_ms', None) - self.phase4_duration_ms = kwargs.get('phase4_duration_ms', None) - self.total_duration_ms = kwargs.get('total_duration_ms', None) - self.kind = 'PartitionReconfigured' - - -class PartitionRestartProgress(Model): + self.kind = 'PartitionReconfigured' # type: str + self.node_name = kwargs['node_name'] + self.node_instance_id = kwargs['node_instance_id'] + self.service_type = kwargs['service_type'] + self.cc_epoch_data_loss_version = kwargs['cc_epoch_data_loss_version'] + self.cc_epoch_config_version = kwargs['cc_epoch_config_version'] + self.reconfig_type = kwargs['reconfig_type'] + self.result = kwargs['result'] + self.phase0_duration_ms = kwargs['phase0_duration_ms'] + self.phase1_duration_ms = kwargs['phase1_duration_ms'] + self.phase2_duration_ms = kwargs['phase2_duration_ms'] + self.phase3_duration_ms = kwargs['phase3_duration_ms'] + self.phase4_duration_ms = kwargs['phase4_duration_ms'] + self.total_duration_ms = kwargs['total_duration_ms'] + + +class PartitionRestartProgress(msrest.serialization.Model): """Information about a partition restart user-induced operation. - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param restart_partition_result: Represents information about an operation - in a terminal state (Completed or Faulted). - :type restart_partition_result: - ~azure.servicefabric.models.RestartPartitionResult + :param restart_partition_result: Represents information about an operation in a terminal state + (Completed or Faulted). + :type restart_partition_result: ~azure.servicefabric.models.RestartPartitionResult """ _attribute_map = { @@ -15563,43 +17183,46 @@ class PartitionRestartProgress(Model): 'restart_partition_result': {'key': 'RestartPartitionResult', 'type': 'RestartPartitionResult'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionRestartProgress, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.restart_partition_result = kwargs.get('restart_partition_result', None) class PartitionsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for the partitions of a service, containing - health evaluations for each unhealthy partition that impacts current - aggregated health state. Can be returned when evaluating service health and - the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for the partitions of a service, containing health evaluations for each unhealthy partition that impacts current aggregated health state. Can be returned when evaluating service health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_partitions_per_service: Maximum allowed - percentage of unhealthy partitions per service from the - ServiceTypeHealthPolicy. + :param max_percent_unhealthy_partitions_per_service: Maximum allowed percentage of unhealthy + partitions per service from the ServiceTypeHealthPolicy. :type max_percent_unhealthy_partitions_per_service: int - :param total_count: Total number of partitions of the service from the - health store. + :param total_count: Total number of partitions of the service from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - PartitionHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy PartitionHealthEvaluation that impacted the aggregated + health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -15607,35 +17230,37 @@ class PartitionsHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_partitions_per_service': {'key': 'MaxPercentUnhealthyPartitionsPerService', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PartitionsHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Partitions' # type: str self.max_percent_unhealthy_partitions_per_service = kwargs.get('max_percent_unhealthy_partitions_per_service', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Partitions' -class ReplicatorStatus(Model): +class ReplicatorStatus(msrest.serialization.Model): """Represents a base class for primary or secondary replicator status. - Contains information about the service fabric replicator like the - replication/copy queue utilization, last acknowledgement received - timestamp, etc. +Contains information about the service fabric replicator like the replication/copy queue utilization, last acknowledgement received timestamp, etc. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus + sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole """ _validation = { @@ -15650,27 +17275,27 @@ class ReplicatorStatus(Model): 'kind': {'Primary': 'PrimaryReplicatorStatus', 'SecondaryReplicatorStatus': 'SecondaryReplicatorStatus'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicatorStatus, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class PrimaryReplicatorStatus(ReplicatorStatus): - """Provides statistics about the Service Fabric Replicator, when it is - functioning in a Primary role. + """Provides statistics about the Service Fabric Replicator, when it is functioning in a Primary role. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param replication_queue_status: Details about the replication queue on - the primary replicator. - :type replication_queue_status: - ~azure.servicefabric.models.ReplicatorQueueStatus - :param remote_replicators: The status of all the active and idle secondary - replicators that the primary is aware of. - :type remote_replicators: - list[~azure.servicefabric.models.RemoteReplicatorStatus] + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param replication_queue_status: Details about the replication queue on the primary replicator. + :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param remote_replicators: The status of all the active and idle secondary replicators that the + primary is aware of. + :type remote_replicators: list[~azure.servicefabric.models.RemoteReplicatorStatus] """ _validation = { @@ -15683,30 +17308,31 @@ class PrimaryReplicatorStatus(ReplicatorStatus): 'remote_replicators': {'key': 'RemoteReplicators', 'type': '[RemoteReplicatorStatus]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PrimaryReplicatorStatus, self).__init__(**kwargs) + self.kind = 'Primary' # type: str self.replication_queue_status = kwargs.get('replication_queue_status', None) self.remote_replicators = kwargs.get('remote_replicators', None) - self.kind = 'Primary' -class Probe(Model): +class Probe(msrest.serialization.Model): """Probes have a number of fields that you can use to control their behavior. - :param initial_delay_seconds: The initial delay in seconds to start - executing probe once codepackage has started. Default value: 0 . + :param initial_delay_seconds: The initial delay in seconds to start executing probe once + codepackage has started. :type initial_delay_seconds: int - :param period_seconds: Periodic seconds to execute probe. Default value: - 10 . + :param period_seconds: Periodic seconds to execute probe. :type period_seconds: int - :param timeout_seconds: Period after which probe is considered as failed - if it hasn't completed successfully. Default value: 1 . + :param timeout_seconds: Period after which probe is considered as failed if it hasn't completed + successfully. :type timeout_seconds: int - :param success_threshold: The count of successful probe executions after - which probe is considered success. Default value: 1 . + :param success_threshold: The count of successful probe executions after which probe is + considered success. :type success_threshold: int - :param failure_threshold: The count of failures after which probe is - considered failed. Default value: 3 . + :param failure_threshold: The count of failures after which probe is considered failed. :type failure_threshold: int :param exec_property: Exec command to run inside the container. :type exec_property: ~azure.servicefabric.models.ProbeExec @@ -15727,7 +17353,10 @@ class Probe(Model): 'tcp_socket': {'key': 'tcpSocket', 'type': 'ProbeTcpSocket'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(Probe, self).__init__(**kwargs) self.initial_delay_seconds = kwargs.get('initial_delay_seconds', 0) self.period_seconds = kwargs.get('period_seconds', 10) @@ -15739,13 +17368,13 @@ def __init__(self, **kwargs): self.tcp_socket = kwargs.get('tcp_socket', None) -class ProbeExec(Model): +class ProbeExec(msrest.serialization.Model): """Exec command to run inside the container. All required parameters must be populated in order to send to Azure. - :param command: Required. Comma separated command to run inside the - container for example "sh, -c, echo hello world". + :param command: Required. Comma separated command to run inside the container for example "sh, + -c, echo hello world". :type command: str """ @@ -15757,12 +17386,15 @@ class ProbeExec(Model): 'command': {'key': 'command', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ProbeExec, self).__init__(**kwargs) - self.command = kwargs.get('command', None) + self.command = kwargs['command'] -class ProbeHttpGet(Model): +class ProbeHttpGet(msrest.serialization.Model): """Http probe for the container. All required parameters must be populated in order to send to Azure. @@ -15775,8 +17407,8 @@ class ProbeHttpGet(Model): :type host: str :param http_headers: Headers to set in the request. :type http_headers: list[~azure.servicefabric.models.ProbeHttpGetHeaders] - :param scheme: Scheme for the http probe. Can be Http or Https. Possible - values include: 'http', 'https' + :param scheme: Scheme for the http probe. Can be Http or Https. Possible values include: + "http", "https". :type scheme: str or ~azure.servicefabric.models.Scheme """ @@ -15792,16 +17424,19 @@ class ProbeHttpGet(Model): 'scheme': {'key': 'scheme', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ProbeHttpGet, self).__init__(**kwargs) - self.port = kwargs.get('port', None) + self.port = kwargs['port'] self.path = kwargs.get('path', None) self.host = kwargs.get('host', None) self.http_headers = kwargs.get('http_headers', None) self.scheme = kwargs.get('scheme', None) -class ProbeHttpGetHeaders(Model): +class ProbeHttpGetHeaders(msrest.serialization.Model): """Http headers. All required parameters must be populated in order to send to Azure. @@ -15822,13 +17457,16 @@ class ProbeHttpGetHeaders(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ProbeHttpGetHeaders, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.value = kwargs.get('value', None) + self.name = kwargs['name'] + self.value = kwargs['value'] -class ProbeTcpSocket(Model): +class ProbeTcpSocket(msrest.serialization.Model): """Tcp port to probe inside the container. All required parameters must be populated in order to send to Azure. @@ -15845,14 +17483,16 @@ class ProbeTcpSocket(Model): 'port': {'key': 'port', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ProbeTcpSocket, self).__init__(**kwargs) - self.port = kwargs.get('port', None) + self.port = kwargs['port'] -class PropertyBatchDescriptionList(Model): - """Describes a list of property batch operations to be executed. Either all or - none of the operations will be committed. +class PropertyBatchDescriptionList(msrest.serialization.Model): + """Describes a list of property batch operations to be executed. Either all or none of the operations will be committed. :param operations: A list of the property batch operations to be executed. :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] @@ -15862,20 +17502,23 @@ class PropertyBatchDescriptionList(Model): 'operations': {'key': 'Operations', 'type': '[PropertyBatchOperation]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PropertyBatchDescriptionList, self).__init__(**kwargs) self.operations = kwargs.get('operations', None) -class PropertyDescription(Model): +class PropertyDescription(msrest.serialization.Model): """Description of a Service Fabric property. All required parameters must be populated in order to send to Azure. :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param custom_type_id: The property's custom type ID. Using this property, - the user is able to tag the type of the value of the property. + :param custom_type_id: The property's custom type ID. Using this property, the user is able to + tag the type of the value of the property. :type custom_type_id: str :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue @@ -15892,14 +17535,17 @@ class PropertyDescription(Model): 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PropertyDescription, self).__init__(**kwargs) - self.property_name = kwargs.get('property_name', None) + self.property_name = kwargs['property_name'] self.custom_type_id = kwargs.get('custom_type_id', None) - self.value = kwargs.get('value', None) + self.value = kwargs['value'] -class PropertyInfo(Model): +class PropertyInfo(msrest.serialization.Model): """Information about a Service Fabric property. All required parameters must be populated in order to send to Azure. @@ -15908,8 +17554,8 @@ class PropertyInfo(Model): :type name: str :param value: Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue - :param metadata: Required. The metadata associated with a property, - including the property's name. + :param metadata: Required. The metadata associated with a property, including the property's + name. :type metadata: ~azure.servicefabric.models.PropertyMetadata """ @@ -15924,33 +17570,35 @@ class PropertyInfo(Model): 'metadata': {'key': 'Metadata', 'type': 'PropertyMetadata'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PropertyInfo, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.value = kwargs.get('value', None) - self.metadata = kwargs.get('metadata', None) + self.metadata = kwargs['metadata'] -class PropertyMetadata(Model): +class PropertyMetadata(msrest.serialization.Model): """The metadata associated with a property, including the property's name. - :param type_id: The kind of property, determined by the type of data. - Following are the possible values. Possible values include: 'Invalid', - 'Binary', 'Int64', 'Double', 'String', 'Guid' + :param type_id: The kind of property, determined by the type of data. Following are the + possible values. Possible values include: "Invalid", "Binary", "Int64", "Double", "String", + "Guid". :type type_id: str or ~azure.servicefabric.models.PropertyValueKind :param custom_type_id: The property's custom type ID. :type custom_type_id: str - :param parent: The name of the parent Service Fabric Name for the - property. It could be thought of as the name-space/table under which the - property exists. + :param parent: The name of the parent Service Fabric Name for the property. It could be thought + of as the name-space/table under which the property exists. :type parent: str :param size_in_bytes: The length of the serialized property value. :type size_in_bytes: int - :param last_modified_utc_timestamp: Represents when the Property was last - modified. Only write operations will cause this field to be updated. - :type last_modified_utc_timestamp: datetime - :param sequence_number: The version of the property. Every time a property - is modified, its sequence number is increased. + :param last_modified_utc_timestamp: Represents when the Property was last modified. Only write + operations will cause this field to be updated. + :type last_modified_utc_timestamp: ~datetime.datetime + :param sequence_number: The version of the property. Every time a property is modified, its + sequence number is increased. :type sequence_number: str """ @@ -15963,7 +17611,10 @@ class PropertyMetadata(Model): 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PropertyMetadata, self).__init__(**kwargs) self.type_id = kwargs.get('type_id', None) self.custom_type_id = kwargs.get('custom_type_id', None) @@ -15974,52 +17625,54 @@ def __init__(self, **kwargs): class ProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): - """Describes the operation to register or provision an application type using - an application package uploaded to the Service Fabric image store. + """Describes the operation to register or provision an application type using an application package uploaded to the Service Fabric image store. All required parameters must be populated in order to send to Azure. - :param async_property: Required. Indicates whether or not provisioning - should occur asynchronously. When set to true, the provision operation - returns when the request is accepted by the system, and the provision - operation continues without any timeout limit. The default value is false. - For large application packages, we recommend setting the value to true. + :param kind: Required. The kind of application type registration or provision requested. The + application package can be registered or provisioned either from the image store or from an + external store. Following are the kinds of the application type provision.Constant filled by + server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". + :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind + :param async_property: Required. Indicates whether or not provisioning should occur + asynchronously. When set to true, the provision operation returns when the request is accepted + by the system, and the provision operation continues without any timeout limit. The default + value is false. For large application packages, we recommend setting the value to true. :type async_property: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_type_build_path: Required. The relative path for the - application package in the image store specified during the prior upload - operation. + :param application_type_build_path: Required. The relative path for the application package in + the image store specified during the prior upload operation. :type application_type_build_path: str - :param application_package_cleanup_policy: The kind of action that needs - to be taken for cleaning up the application package after successful - provision. Possible values include: 'Invalid', 'Default', 'Automatic', - 'Manual' + :param application_package_cleanup_policy: The kind of action that needs to be taken for + cleaning up the application package after successful provision. Possible values include: + "Invalid", "Default", "Automatic", "Manual". :type application_package_cleanup_policy: str or ~azure.servicefabric.models.ApplicationPackageCleanupPolicy """ _validation = { - 'async_property': {'required': True}, 'kind': {'required': True}, + 'async_property': {'required': True}, 'application_type_build_path': {'required': True}, } _attribute_map = { - 'async_property': {'key': 'Async', 'type': 'bool'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'async_property': {'key': 'Async', 'type': 'bool'}, 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, 'application_package_cleanup_policy': {'key': 'ApplicationPackageCleanupPolicy', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ProvisionApplicationTypeDescription, self).__init__(**kwargs) - self.application_type_build_path = kwargs.get('application_type_build_path', None) + self.kind = 'ImageStorePath' # type: str + self.application_type_build_path = kwargs['application_type_build_path'] self.application_package_cleanup_policy = kwargs.get('application_package_cleanup_policy', None) - self.kind = 'ImageStorePath' -class ProvisionFabricDescription(Model): +class ProvisionFabricDescription(msrest.serialization.Model): """Describes the parameters for provisioning a cluster. :param code_file_path: The cluster code package file path. @@ -16033,7 +17686,10 @@ class ProvisionFabricDescription(Model): 'cluster_manifest_file_path': {'key': 'ClusterManifestFilePath', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ProvisionFabricDescription, self).__init__(**kwargs) self.code_file_path = kwargs.get('code_file_path', None) self.cluster_manifest_file_path = kwargs.get('cluster_manifest_file_path', None) @@ -16041,66 +17697,64 @@ def __init__(self, **kwargs): class PutPropertyBatchOperation(PropertyBatchOperation): """Puts the specified property under the specified name. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue - :param custom_type_id: The property's custom type ID. Using this property, - the user is able to tag the type of the value of the property. + :param custom_type_id: The property's custom type ID. Using this property, the user is able to + tag the type of the value of the property. :type custom_type_id: str """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, 'value': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'PropertyValue'}, 'custom_type_id': {'key': 'CustomTypeId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(PutPropertyBatchOperation, self).__init__(**kwargs) - self.value = kwargs.get('value', None) + self.kind = 'Put' # type: str + self.value = kwargs['value'] self.custom_type_id = kwargs.get('custom_type_id', None) - self.kind = 'Put' - - -class ReconfigurationInformation(Model): - """Information about current reconfiguration like phase, type, previous - configuration role of replica and reconfiguration start date time. - - :param previous_configuration_role: Replica role before reconfiguration - started. Possible values include: 'Unknown', 'None', 'Primary', - 'IdleSecondary', 'ActiveSecondary' - :type previous_configuration_role: str or - ~azure.servicefabric.models.ReplicaRole - :param reconfiguration_phase: Current phase of ongoing reconfiguration. If - no reconfiguration is taking place then this value will be "None". - Possible values include: 'Unknown', 'None', 'Phase0', 'Phase1', 'Phase2', - 'Phase3', 'Phase4', 'AbortPhaseZero' - :type reconfiguration_phase: str or - ~azure.servicefabric.models.ReconfigurationPhase - :param reconfiguration_type: Type of current ongoing reconfiguration. If - no reconfiguration is taking place then this value will be "None". - Possible values include: 'Unknown', 'SwapPrimary', 'Failover', 'Other' - :type reconfiguration_type: str or - ~azure.servicefabric.models.ReconfigurationType - :param reconfiguration_start_time_utc: Start time (in UTC) of the ongoing - reconfiguration. If no reconfiguration is taking place then this value - will be zero date-time. - :type reconfiguration_start_time_utc: datetime + + +class ReconfigurationInformation(msrest.serialization.Model): + """Information about current reconfiguration like phase, type, previous configuration role of replica and reconfiguration start date time. + + :param previous_configuration_role: Replica role before reconfiguration started. Possible + values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type previous_configuration_role: str or ~azure.servicefabric.models.ReplicaRole + :param reconfiguration_phase: Current phase of ongoing reconfiguration. If no reconfiguration + is taking place then this value will be "None". Possible values include: "Unknown", "None", + "Phase0", "Phase1", "Phase2", "Phase3", "Phase4", "AbortPhaseZero". + :type reconfiguration_phase: str or ~azure.servicefabric.models.ReconfigurationPhase + :param reconfiguration_type: Type of current ongoing reconfiguration. If no reconfiguration is + taking place then this value will be "None". Possible values include: "Unknown", "SwapPrimary", + "Failover", "Other". + :type reconfiguration_type: str or ~azure.servicefabric.models.ReconfigurationType + :param reconfiguration_start_time_utc: Start time (in UTC) of the ongoing reconfiguration. If + no reconfiguration is taking place then this value will be zero date-time. + :type reconfiguration_start_time_utc: ~datetime.datetime """ _attribute_map = { @@ -16110,7 +17764,10 @@ class ReconfigurationInformation(Model): 'reconfiguration_start_time_utc': {'key': 'ReconfigurationStartTimeUtc', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReconfigurationInformation, self).__init__(**kwargs) self.previous_configuration_role = kwargs.get('previous_configuration_role', None) self.reconfiguration_phase = kwargs.get('reconfiguration_phase', None) @@ -16118,16 +17775,14 @@ def __init__(self, **kwargs): self.reconfiguration_start_time_utc = kwargs.get('reconfiguration_start_time_utc', None) -class RegistryCredential(Model): +class RegistryCredential(msrest.serialization.Model): """Credential information to connect to container registry. :param registry_user_name: The user name to connect to container registry. :type registry_user_name: str - :param registry_password: The password for supplied username to connect to - container registry. + :param registry_password: The password for supplied username to connect to container registry. :type registry_password: str - :param password_encrypted: Indicates that supplied container registry - password is encrypted. + :param password_encrypted: Indicates that supplied container registry password is encrypted. :type password_encrypted: bool """ @@ -16137,25 +17792,27 @@ class RegistryCredential(Model): 'password_encrypted': {'key': 'PasswordEncrypted', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RegistryCredential, self).__init__(**kwargs) self.registry_user_name = kwargs.get('registry_user_name', None) self.registry_password = kwargs.get('registry_password', None) self.password_encrypted = kwargs.get('password_encrypted', None) -class ReliableCollectionsRef(Model): +class ReliableCollectionsRef(msrest.serialization.Model): """Specifying this parameter adds support for reliable collections. All required parameters must be populated in order to send to Azure. - :param name: Required. Name of ReliableCollection resource. Right now it's - not used and you can use any string. + :param name: Required. Name of ReliableCollection resource. Right now it's not used and you can + use any string. :type name: str - :param do_not_persist_state: False (the default) if ReliableCollections - state is persisted to disk as usual. True if you do not want to persist - state, in which case replication is still enabled and you can use - ReliableCollections as distributed cache. + :param do_not_persist_state: False (the default) if ReliableCollections state is persisted to + disk as usual. True if you do not want to persist state, in which case replication is still + enabled and you can use ReliableCollections as distributed cache. :type do_not_persist_state: bool """ @@ -16168,28 +17825,29 @@ class ReliableCollectionsRef(Model): 'do_not_persist_state': {'key': 'doNotPersistState', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReliableCollectionsRef, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.do_not_persist_state = kwargs.get('do_not_persist_state', None) -class RemoteReplicatorAcknowledgementDetail(Model): - """Provides various statistics of the acknowledgements that are being received - from the remote replicator. +class RemoteReplicatorAcknowledgementDetail(msrest.serialization.Model): + """Provides various statistics of the acknowledgements that are being received from the remote replicator. - :param average_receive_duration: Represents the average duration it takes - for the remote replicator to receive an operation. + :param average_receive_duration: Represents the average duration it takes for the remote + replicator to receive an operation. :type average_receive_duration: str - :param average_apply_duration: Represents the average duration it takes - for the remote replicator to apply an operation. This usually entails - writing the operation to disk. + :param average_apply_duration: Represents the average duration it takes for the remote + replicator to apply an operation. This usually entails writing the operation to disk. :type average_apply_duration: str - :param not_received_count: Represents the number of operations not yet - received by a remote replicator. + :param not_received_count: Represents the number of operations not yet received by a remote + replicator. :type not_received_count: str - :param received_and_not_applied_count: Represents the number of operations - received and not yet applied by a remote replicator. + :param received_and_not_applied_count: Represents the number of operations received and not yet + applied by a remote replicator. :type received_and_not_applied_count: str """ @@ -16200,7 +17858,10 @@ class RemoteReplicatorAcknowledgementDetail(Model): 'received_and_not_applied_count': {'key': 'ReceivedAndNotAppliedCount', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RemoteReplicatorAcknowledgementDetail, self).__init__(**kwargs) self.average_receive_duration = kwargs.get('average_receive_duration', None) self.average_apply_duration = kwargs.get('average_apply_duration', None) @@ -16208,17 +17869,15 @@ def __init__(self, **kwargs): self.received_and_not_applied_count = kwargs.get('received_and_not_applied_count', None) -class RemoteReplicatorAcknowledgementStatus(Model): - """Provides details about the remote replicators from the primary replicator's - point of view. +class RemoteReplicatorAcknowledgementStatus(msrest.serialization.Model): + """Provides details about the remote replicators from the primary replicator's point of view. - :param replication_stream_acknowledgement_detail: Details about the - acknowledgements for operations that are part of the replication stream - data. + :param replication_stream_acknowledgement_detail: Details about the acknowledgements for + operations that are part of the replication stream data. :type replication_stream_acknowledgement_detail: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail - :param copy_stream_acknowledgement_detail: Details about the - acknowledgements for operations that are part of the copy stream data. + :param copy_stream_acknowledgement_detail: Details about the acknowledgements for operations + that are part of the copy stream data. :type copy_stream_acknowledgement_detail: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail """ @@ -16228,46 +17887,45 @@ class RemoteReplicatorAcknowledgementStatus(Model): 'copy_stream_acknowledgement_detail': {'key': 'CopyStreamAcknowledgementDetail', 'type': 'RemoteReplicatorAcknowledgementDetail'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RemoteReplicatorAcknowledgementStatus, self).__init__(**kwargs) self.replication_stream_acknowledgement_detail = kwargs.get('replication_stream_acknowledgement_detail', None) self.copy_stream_acknowledgement_detail = kwargs.get('copy_stream_acknowledgement_detail', None) -class RemoteReplicatorStatus(Model): - """Represents the state of the secondary replicator from the primary - replicator’s point of view. +class RemoteReplicatorStatus(msrest.serialization.Model): + """Represents the state of the secondary replicator from the primary replicator’s point of view. - :param replica_id: Represents the replica ID of the remote secondary - replicator. + :param replica_id: Represents the replica ID of the remote secondary replicator. :type replica_id: str - :param last_acknowledgement_processed_time_utc: The last timestamp (in - UTC) when an acknowledgement from the secondary replicator was processed - on the primary. - UTC 0 represents an invalid value, indicating that no acknowledgement - messages were ever processed. - :type last_acknowledgement_processed_time_utc: datetime - :param last_received_replication_sequence_number: The highest replication - operation sequence number that the secondary has received from the - primary. + :param last_acknowledgement_processed_time_utc: The last timestamp (in UTC) when an + acknowledgement from the secondary replicator was processed on the primary. + UTC 0 represents an invalid value, indicating that no acknowledgement messages were ever + processed. + :type last_acknowledgement_processed_time_utc: ~datetime.datetime + :param last_received_replication_sequence_number: The highest replication operation sequence + number that the secondary has received from the primary. :type last_received_replication_sequence_number: str - :param last_applied_replication_sequence_number: The highest replication - operation sequence number that the secondary has applied to its state. + :param last_applied_replication_sequence_number: The highest replication operation sequence + number that the secondary has applied to its state. :type last_applied_replication_sequence_number: str - :param is_in_build: A value that indicates whether the secondary replica - is in the process of being built. + :param is_in_build: A value that indicates whether the secondary replica is in the process of + being built. :type is_in_build: bool - :param last_received_copy_sequence_number: The highest copy operation - sequence number that the secondary has received from the primary. + :param last_received_copy_sequence_number: The highest copy operation sequence number that the + secondary has received from the primary. A value of -1 implies that the secondary has received all copy operations. :type last_received_copy_sequence_number: str - :param last_applied_copy_sequence_number: The highest copy operation - sequence number that the secondary has applied to its state. - A value of -1 implies that the secondary has applied all copy operations - and the copy process is complete. + :param last_applied_copy_sequence_number: The highest copy operation sequence number that the + secondary has applied to its state. + A value of -1 implies that the secondary has applied all copy operations and the copy process + is complete. :type last_applied_copy_sequence_number: str - :param remote_replicator_acknowledgement_status: Represents the - acknowledgment status for the remote secondary replicator. + :param remote_replicator_acknowledgement_status: Represents the acknowledgment status for the + remote secondary replicator. :type remote_replicator_acknowledgement_status: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementStatus """ @@ -16283,7 +17941,10 @@ class RemoteReplicatorStatus(Model): 'remote_replicator_acknowledgement_status': {'key': 'RemoteReplicatorAcknowledgementStatus', 'type': 'RemoteReplicatorAcknowledgementStatus'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RemoteReplicatorStatus, self).__init__(**kwargs) self.replica_id = kwargs.get('replica_id', None) self.last_acknowledgement_processed_time_utc = kwargs.get('last_acknowledgement_processed_time_utc', None) @@ -16295,95 +17956,87 @@ def __init__(self, **kwargs): self.remote_replicator_acknowledgement_status = kwargs.get('remote_replicator_acknowledgement_status', None) -class RepairTask(Model): - """Represents a repair task, which includes information about what kind of - repair was requested, what its progress is, and what its final result was. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. +class RepairTask(msrest.serialization.Model): + """Represents a repair task, which includes information about what kind of repair was requested, what its progress is, and what its final result was. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str :param version: The version of the repair task. - When creating a new repair task, the version must be set to zero. When - updating a repair task, + When creating a new repair task, the version must be set to zero. When updating a repair + task, the version is used for optimistic concurrency checks. If the version is - set to zero, the update will not check for write conflicts. If the - version is set to a non-zero value, then the - update will only succeed if the actual current version of the repair task - matches this value. + set to zero, the update will not check for write conflicts. If the version is set to a + non-zero value, then the + update will only succeed if the actual current version of the repair task matches this value. :type version: str - :param description: A description of the purpose of the repair task, or - other informational details. + :param description: A description of the purpose of the repair task, or other informational + details. May be set when the repair task is created, and is immutable once set. :type description: str - :param state: Required. The workflow state of the repair task. Valid - initial states are Created, Claimed, and Preparing. Possible values - include: 'Invalid', 'Created', 'Claimed', 'Preparing', 'Approved', - 'Executing', 'Restoring', 'Completed' + :param state: Required. The workflow state of the repair task. Valid initial states are + Created, Claimed, and Preparing. Possible values include: "Invalid", "Created", "Claimed", + "Preparing", "Approved", "Executing", "Restoring", "Completed". :type state: str or ~azure.servicefabric.models.State - :param flags: A bitwise-OR of the following values, which gives additional - details about the status of the repair task. - - 1 - Cancellation of the repair has been requested - - 2 - Abort of the repair has been requested - - 4 - Approval of the repair was forced via client request + :param flags: A bitwise-OR of the following values, which gives additional details about the + status of the repair task. + + + * 1 - Cancellation of the repair has been requested + * 2 - Abort of the repair has been requested + * 4 - Approval of the repair was forced via client request. :type flags: int - :param action: Required. The requested repair action. Must be specified - when the repair task is created, and is immutable once set. + :param action: Required. The requested repair action. Must be specified when the repair task is + created, and is immutable once set. :type action: str - :param target: The target object determines what actions the system will - take to prepare for the impact of the repair, prior to approving execution - of the repair. + :param target: The target object determines what actions the system will take to prepare for + the impact of the repair, prior to approving execution of the repair. May be set when the repair task is created, and is immutable once set. :type target: ~azure.servicefabric.models.RepairTargetDescriptionBase - :param executor: The name of the repair executor. Must be specified in - Claimed and later states, and is immutable once set. + :param executor: The name of the repair executor. Must be specified in Claimed and later + states, and is immutable once set. :type executor: str - :param executor_data: A data string that the repair executor can use to - store its internal state. + :param executor_data: A data string that the repair executor can use to store its internal + state. :type executor_data: str - :param impact: The impact object determines what actions the system will - take to prepare for the impact of the repair, prior to approving execution - of the repair. - Impact must be specified by the repair executor when transitioning to the - Preparing state, and is immutable once set. + :param impact: The impact object determines what actions the system will take to prepare for + the impact of the repair, prior to approving execution of the repair. + Impact must be specified by the repair executor when transitioning to the Preparing state, and + is immutable once set. :type impact: ~azure.servicefabric.models.RepairImpactDescriptionBase - :param result_status: A value describing the overall result of the repair - task execution. Must be specified in the Restoring and later states, and - is immutable once set. Possible values include: 'Invalid', 'Succeeded', - 'Cancelled', 'Interrupted', 'Failed', 'Pending' + :param result_status: A value describing the overall result of the repair task execution. Must + be specified in the Restoring and later states, and is immutable once set. Possible values + include: "Invalid", "Succeeded", "Cancelled", "Interrupted", "Failed", "Pending". :type result_status: str or ~azure.servicefabric.models.ResultStatus - :param result_code: A numeric value providing additional details about the - result of the repair task execution. - May be specified in the Restoring and later states, and is immutable once - set. + :param result_code: A numeric value providing additional details about the result of the repair + task execution. + May be specified in the Restoring and later states, and is immutable once set. :type result_code: int - :param result_details: A string providing additional details about the - result of the repair task execution. - May be specified in the Restoring and later states, and is immutable once - set. + :param result_details: A string providing additional details about the result of the repair + task execution. + May be specified in the Restoring and later states, and is immutable once set. :type result_details: str - :param history: An object that contains timestamps of the repair task's - state transitions. - These timestamps are updated by the system, and cannot be directly - modified. + :param history: An object that contains timestamps of the repair task's state transitions. + These timestamps are updated by the system, and cannot be directly modified. :type history: ~azure.servicefabric.models.RepairTaskHistory - :param preparing_health_check_state: The workflow state of the health - check when the repair task is in the Preparing state. Possible values - include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' + :param preparing_health_check_state: The workflow state of the health check when the repair + task is in the Preparing state. Possible values include: "NotStarted", "InProgress", + "Succeeded", "Skipped", "TimedOut". :type preparing_health_check_state: str or ~azure.servicefabric.models.RepairTaskHealthCheckState - :param restoring_health_check_state: The workflow state of the health - check when the repair task is in the Restoring state. Possible values - include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' + :param restoring_health_check_state: The workflow state of the health check when the repair + task is in the Restoring state. Possible values include: "NotStarted", "InProgress", + "Succeeded", "Skipped", "TimedOut". :type restoring_health_check_state: str or ~azure.servicefabric.models.RepairTaskHealthCheckState - :param perform_preparing_health_check: A value to determine if health - checks will be performed when the repair task enters the Preparing state. + :param perform_preparing_health_check: A value to determine if health checks will be performed + when the repair task enters the Preparing state. :type perform_preparing_health_check: bool - :param perform_restoring_health_check: A value to determine if health - checks will be performed when the repair task enters the Restoring state. + :param perform_restoring_health_check: A value to determine if health checks will be performed + when the repair task enters the Restoring state. :type perform_restoring_health_check: bool """ @@ -16414,14 +18067,17 @@ class RepairTask(Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RepairTask, self).__init__(**kwargs) - self.task_id = kwargs.get('task_id', None) + self.task_id = kwargs['task_id'] self.version = kwargs.get('version', None) self.description = kwargs.get('description', None) - self.state = kwargs.get('state', None) + self.state = kwargs['state'] self.flags = kwargs.get('flags', None) - self.action = kwargs.get('action', None) + self.action = kwargs['action'] self.target = kwargs.get('target', None) self.executor = kwargs.get('executor', None) self.executor_data = kwargs.get('executor_data', None) @@ -16436,19 +18092,18 @@ def __init__(self, **kwargs): self.perform_restoring_health_check = kwargs.get('perform_restoring_health_check', None) -class RepairTaskApproveDescription(Model): +class RepairTaskApproveDescription(msrest.serialization.Model): """Describes a request for forced approval of a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current version of the repair task. If zero, then no version check - is performed. + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. :type version: str """ @@ -16461,29 +18116,31 @@ class RepairTaskApproveDescription(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RepairTaskApproveDescription, self).__init__(**kwargs) - self.task_id = kwargs.get('task_id', None) + self.task_id = kwargs['task_id'] self.version = kwargs.get('version', None) -class RepairTaskCancelDescription(Model): +class RepairTaskCancelDescription(msrest.serialization.Model): """Describes a request to cancel a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current version of the repair task. If zero, then no version check - is performed. + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. :type version: str - :param request_abort: _True_ if the repair should be stopped as soon as - possible even if it has already started executing. _False_ if the repair - should be cancelled only if execution has not yet started. + :param request_abort: *True* if the repair should be stopped as soon as possible even if it has + already started executing. *False* if the repair should be cancelled only if execution has not + yet started. :type request_abort: bool """ @@ -16497,27 +18154,28 @@ class RepairTaskCancelDescription(Model): 'request_abort': {'key': 'RequestAbort', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RepairTaskCancelDescription, self).__init__(**kwargs) - self.task_id = kwargs.get('task_id', None) + self.task_id = kwargs['task_id'] self.version = kwargs.get('version', None) self.request_abort = kwargs.get('request_abort', None) -class RepairTaskDeleteDescription(Model): +class RepairTaskDeleteDescription(msrest.serialization.Model): """Describes a request to delete a completed repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. - :param task_id: Required. The ID of the completed repair task to be - deleted. + :param task_id: Required. The ID of the completed repair task to be deleted. :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current version of the repair task. If zero, then no version check - is performed. + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. :type version: str """ @@ -16530,50 +18188,46 @@ class RepairTaskDeleteDescription(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RepairTaskDeleteDescription, self).__init__(**kwargs) - self.task_id = kwargs.get('task_id', None) + self.task_id = kwargs['task_id'] self.version = kwargs.get('version', None) -class RepairTaskHistory(Model): +class RepairTaskHistory(msrest.serialization.Model): """A record of the times when the repair task entered each state. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. - - :param created_utc_timestamp: The time when the repair task entered the - Created state. - :type created_utc_timestamp: datetime - :param claimed_utc_timestamp: The time when the repair task entered the - Claimed state. - :type claimed_utc_timestamp: datetime - :param preparing_utc_timestamp: The time when the repair task entered the - Preparing state. - :type preparing_utc_timestamp: datetime - :param approved_utc_timestamp: The time when the repair task entered the - Approved state - :type approved_utc_timestamp: datetime - :param executing_utc_timestamp: The time when the repair task entered the - Executing state - :type executing_utc_timestamp: datetime - :param restoring_utc_timestamp: The time when the repair task entered the - Restoring state - :type restoring_utc_timestamp: datetime - :param completed_utc_timestamp: The time when the repair task entered the - Completed state - :type completed_utc_timestamp: datetime - :param preparing_health_check_start_utc_timestamp: The time when the - repair task started the health check in the Preparing state. - :type preparing_health_check_start_utc_timestamp: datetime - :param preparing_health_check_end_utc_timestamp: The time when the repair - task completed the health check in the Preparing state. - :type preparing_health_check_end_utc_timestamp: datetime - :param restoring_health_check_start_utc_timestamp: The time when the - repair task started the health check in the Restoring state. - :type restoring_health_check_start_utc_timestamp: datetime - :param restoring_health_check_end_utc_timestamp: The time when the repair - task completed the health check in the Restoring state. - :type restoring_health_check_end_utc_timestamp: datetime + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. + + :param created_utc_timestamp: The time when the repair task entered the Created state. + :type created_utc_timestamp: ~datetime.datetime + :param claimed_utc_timestamp: The time when the repair task entered the Claimed state. + :type claimed_utc_timestamp: ~datetime.datetime + :param preparing_utc_timestamp: The time when the repair task entered the Preparing state. + :type preparing_utc_timestamp: ~datetime.datetime + :param approved_utc_timestamp: The time when the repair task entered the Approved state. + :type approved_utc_timestamp: ~datetime.datetime + :param executing_utc_timestamp: The time when the repair task entered the Executing state. + :type executing_utc_timestamp: ~datetime.datetime + :param restoring_utc_timestamp: The time when the repair task entered the Restoring state. + :type restoring_utc_timestamp: ~datetime.datetime + :param completed_utc_timestamp: The time when the repair task entered the Completed state. + :type completed_utc_timestamp: ~datetime.datetime + :param preparing_health_check_start_utc_timestamp: The time when the repair task started the + health check in the Preparing state. + :type preparing_health_check_start_utc_timestamp: ~datetime.datetime + :param preparing_health_check_end_utc_timestamp: The time when the repair task completed the + health check in the Preparing state. + :type preparing_health_check_end_utc_timestamp: ~datetime.datetime + :param restoring_health_check_start_utc_timestamp: The time when the repair task started the + health check in the Restoring state. + :type restoring_health_check_start_utc_timestamp: ~datetime.datetime + :param restoring_health_check_end_utc_timestamp: The time when the repair task completed the + health check in the Restoring state. + :type restoring_health_check_end_utc_timestamp: ~datetime.datetime """ _attribute_map = { @@ -16590,7 +18244,10 @@ class RepairTaskHistory(Model): 'restoring_health_check_end_utc_timestamp': {'key': 'RestoringHealthCheckEndUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RepairTaskHistory, self).__init__(**kwargs) self.created_utc_timestamp = kwargs.get('created_utc_timestamp', None) self.claimed_utc_timestamp = kwargs.get('claimed_utc_timestamp', None) @@ -16605,29 +18262,26 @@ def __init__(self, **kwargs): self.restoring_health_check_end_utc_timestamp = kwargs.get('restoring_health_check_end_utc_timestamp', None) -class RepairTaskUpdateHealthPolicyDescription(Model): +class RepairTaskUpdateHealthPolicyDescription(msrest.serialization.Model): """Describes a request to update the health policy of a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task to be updated. :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current value of the repair task. If zero, then no version check is - performed. + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current value of the repair task. If zero, + then no version check is performed. :type version: str - :param perform_preparing_health_check: A boolean indicating if health - check is to be performed in the Preparing stage of the repair task. If not - specified the existing value should not be altered. Otherwise, specify the - desired new value. + :param perform_preparing_health_check: A boolean indicating if health check is to be performed + in the Preparing stage of the repair task. If not specified the existing value should not be + altered. Otherwise, specify the desired new value. :type perform_preparing_health_check: bool - :param perform_restoring_health_check: A boolean indicating if health - check is to be performed in the Restoring stage of the repair task. If not - specified the existing value should not be altered. Otherwise, specify the - desired new value. + :param perform_restoring_health_check: A boolean indicating if health check is to be performed + in the Restoring stage of the repair task. If not specified the existing value should not be + altered. Otherwise, specify the desired new value. :type perform_restoring_health_check: bool """ @@ -16642,18 +18296,21 @@ class RepairTaskUpdateHealthPolicyDescription(Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RepairTaskUpdateHealthPolicyDescription, self).__init__(**kwargs) - self.task_id = kwargs.get('task_id', None) + self.task_id = kwargs['task_id'] self.version = kwargs.get('version', None) self.perform_preparing_health_check = kwargs.get('perform_preparing_health_check', None) self.perform_restoring_health_check = kwargs.get('perform_restoring_health_check', None) -class RepairTaskUpdateInfo(Model): +class RepairTaskUpdateInfo(msrest.serialization.Model): """Describes the result of an operation that created or updated a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. @@ -16669,44 +18326,43 @@ class RepairTaskUpdateInfo(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RepairTaskUpdateInfo, self).__init__(**kwargs) - self.version = kwargs.get('version', None) + self.version = kwargs['version'] class ReplicaHealth(EntityHealth): - """Represents a base class for stateful service replica or stateless service - instance health. - Contains the replica aggregated health state, the health events and the - unhealthy evaluations. + """Represents a base class for stateful service replica or stateless service instance health. +Contains the replica aggregated health state, the health events and the unhealthy evaluations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaHealth, - StatelessServiceInstanceHealth + sub-classes are: StatefulServiceReplicaHealth, StatelessServiceInstanceHealth. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -16718,58 +18374,60 @@ class ReplicaHealth(EntityHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaHealth', 'Stateless': 'StatelessServiceInstanceHealth'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaHealth, self).__init__(**kwargs) + self.service_kind = 'ReplicaHealth' # type: str self.partition_id = kwargs.get('partition_id', None) - self.service_kind = None - self.service_kind = 'ReplicaHealth' class ReplicaHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a replica, containing information about - the data and the algorithm used by health store to evaluate health. The - evaluation is returned only when the aggregated health state is either - Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a replica, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param partition_id: Id of the partition to which the replica belongs. :type partition_id: str - :param replica_or_instance_id: Id of a stateful service replica or a - stateless service instance. This ID is used in the queries that apply to - both stateful and stateless services. It is used by Service Fabric to - uniquely identify a replica of a partition of a stateful service or an - instance of a stateless service partition. It is unique within a partition - and does not change for the lifetime of the replica or the instance. If a - stateful replica gets dropped and another replica gets created on the same - node for the same partition, it will get a different value for the ID. If - a stateless instance is failed over on the same or different node it will + :param replica_or_instance_id: Id of a stateful service replica or a stateless service + instance. This ID is used in the queries that apply to both stateful and stateless services. It + is used by Service Fabric to uniquely identify a replica of a partition of a stateful service + or an instance of a stateless service partition. It is unique within a partition and does not + change for the lifetime of the replica or the instance. If a stateful replica gets dropped and + another replica gets created on the same node for the same partition, it will get a different + value for the ID. If a stateless instance is failed over on the same or different node it will get a different value for the ID. :type replica_or_instance_id: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the replica. The types of the - unhealthy evaluations can be EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the replica. The types of the unhealthy evaluations can be + EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -16777,43 +18435,42 @@ class ReplicaHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Replica' # type: str self.partition_id = kwargs.get('partition_id', None) self.replica_or_instance_id = kwargs.get('replica_or_instance_id', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Replica' class ReplicaHealthState(EntityHealthState): - """Represents a base class for stateful service replica or stateless service - instance health state. + """Represents a base class for stateful service replica or stateless service instance health state. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaHealthState, - StatelessServiceInstanceHealthState + sub-classes are: StatefulServiceReplicaHealthState, StatelessServiceInstanceHealthState. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param partition_id: The ID of the partition to which this replica - belongs. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param partition_id: The ID of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -16822,40 +18479,38 @@ class ReplicaHealthState(EntityHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaHealthState', 'Stateless': 'StatelessServiceInstanceHealthState'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaHealthState, self).__init__(**kwargs) + self.service_kind = 'ReplicaHealthState' # type: str self.partition_id = kwargs.get('partition_id', None) - self.service_kind = None - self.service_kind = 'ReplicaHealthState' class ReplicaHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a stateful service replica or a - stateless service instance. - The replica health state contains the replica ID and its aggregated health - state. - - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + """Represents the health state chunk of a stateful service replica or a stateless service instance. +The replica health state contains the replica ID and its aggregated health state. + + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param replica_or_instance_id: Id of a stateful service replica or a - stateless service instance. This ID is used in the queries that apply to - both stateful and stateless services. It is used by Service Fabric to - uniquely identify a replica of a partition of a stateful service or an - instance of a stateless service partition. It is unique within a partition - and does not change for the lifetime of the replica or the instance. If a - stateful replica gets dropped and another replica gets created on the same - node for the same partition, it will get a different value for the ID. If - a stateless instance is failed over on the same or different node it will + :param replica_or_instance_id: Id of a stateful service replica or a stateless service + instance. This ID is used in the queries that apply to both stateful and stateless services. It + is used by Service Fabric to uniquely identify a replica of a partition of a stateful service + or an instance of a stateless service partition. It is unique within a partition and does not + change for the lifetime of the replica or the instance. If a stateful replica gets dropped and + another replica gets created on the same node for the same partition, it will get a different + value for the ID. If a stateless instance is failed over on the same or different node it will get a different value for the ID. :type replica_or_instance_id: str """ @@ -16865,17 +18520,19 @@ class ReplicaHealthStateChunk(EntityHealthStateChunk): 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaHealthStateChunk, self).__init__(**kwargs) self.replica_or_instance_id = kwargs.get('replica_or_instance_id', None) -class ReplicaHealthStateChunkList(Model): - """The list of replica health state chunks that respect the input filters in - the chunk query. Returned by get cluster health state chunks query. +class ReplicaHealthStateChunkList(msrest.serialization.Model): + """The list of replica health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param items: The list of replica health state chunks that respect the - input filters in the chunk query. + :param items: The list of replica health state chunks that respect the input filters in the + chunk query. :type items: list[~azure.servicefabric.models.ReplicaHealthStateChunk] """ @@ -16883,56 +18540,49 @@ class ReplicaHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[ReplicaHealthStateChunk]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class ReplicaHealthStateFilter(Model): - """Defines matching criteria to determine whether a replica should be included - as a child of a partition in the cluster health chunk. - The replicas are only returned if the parent entities match a filter - specified in the cluster health chunk query description. The parent - partition, service and application must be included in the cluster health - chunk. - One filter can match zero, one or multiple replicas, depending on its - properties. - - :param replica_or_instance_id_filter: Id of the stateful service replica - or stateless service instance that matches the filter. The filter is - applied only to the specified replica, if it exists. - If the replica doesn't exist, no replica is returned in the cluster health - chunk based on this filter. - If the replica exists, it is included in the cluster health chunk if it - respects the other filter properties. - If not specified, all replicas that match the parent filters (if any) are - taken into consideration and matched against the other filter members, - like health state filter. +class ReplicaHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a replica should be included as a child of a partition in the cluster health chunk. +The replicas are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent partition, service and application must be included in the cluster health chunk. +One filter can match zero, one or multiple replicas, depending on its properties. + + :param replica_or_instance_id_filter: Id of the stateful service replica or stateless service + instance that matches the filter. The filter is applied only to the specified replica, if it + exists. + If the replica doesn't exist, no replica is returned in the cluster health chunk based on this + filter. + If the replica exists, it is included in the cluster health chunk if it respects the other + filter properties. + If not specified, all replicas that match the parent filters (if any) are taken into + consideration and matched against the other filter members, like health state filter. :type replica_or_instance_id_filter: str - :param health_state_filter: The filter for the health state of the - replicas. It allows selecting replicas if they match the desired health - states. - The possible values are integer value of one of the following health - states. Only replicas that match the filter are returned. All replicas are - used to evaluate the parent partition aggregated health state. - If not specified, default value is None, unless the replica ID is - specified. If the filter has default value and replica ID is specified, - the matching replica is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches replicas with - HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the replicas. It allows + selecting replicas if they match the desired health states. + The possible values are integer value of one of the following health states. Only replicas + that match the filter are returned. All replicas are used to evaluate the parent partition + aggregated health state. + If not specified, default value is None, unless the replica ID is specified. If the filter has + default value and replica ID is specified, the matching replica is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches replicas with HealthState value of OK (2) + and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int """ @@ -16941,38 +18591,39 @@ class ReplicaHealthStateFilter(Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaHealthStateFilter, self).__init__(**kwargs) self.replica_or_instance_id_filter = kwargs.get('replica_or_instance_id_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) -class ReplicaInfo(Model): - """Information about the identity, status, health, node name, uptime, and - other details about the replica. +class ReplicaInfo(msrest.serialization.Model): + """Information about the identity, status, health, node name, uptime, and other details about the replica. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo + sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo. All required parameters must be populated in order to send to Azure. - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of - the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. :type last_in_build_duration_in_seconds: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -16980,38 +18631,64 @@ class ReplicaInfo(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaInfo', 'Stateless': 'StatelessServiceInstanceInfo'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaInfo, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.replica_status = kwargs.get('replica_status', None) self.health_state = kwargs.get('health_state', None) self.node_name = kwargs.get('node_name', None) self.address = kwargs.get('address', None) self.last_in_build_duration_in_seconds = kwargs.get('last_in_build_duration_in_seconds', None) - self.service_kind = None -class ReplicaMetricLoadDescription(Model): - """Specifies metric loads of a partition's specific secondary replica or - instance. +class ReplicaLifecycleDescription(msrest.serialization.Model): + """Describes how the replica will behave. + + :param is_singleton_replica_move_allowed_during_upgrade: If set to true, replicas with a target + replica set size of 1 will be permitted to move during upgrade. + :type is_singleton_replica_move_allowed_during_upgrade: bool + :param restore_replica_location_after_upgrade: If set to true, move/swap replica to original + location after upgrade. + :type restore_replica_location_after_upgrade: bool + """ + + _attribute_map = { + 'is_singleton_replica_move_allowed_during_upgrade': {'key': 'IsSingletonReplicaMoveAllowedDuringUpgrade', 'type': 'bool'}, + 'restore_replica_location_after_upgrade': {'key': 'RestoreReplicaLocationAfterUpgrade', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(ReplicaLifecycleDescription, self).__init__(**kwargs) + self.is_singleton_replica_move_allowed_during_upgrade = kwargs.get('is_singleton_replica_move_allowed_during_upgrade', None) + self.restore_replica_location_after_upgrade = kwargs.get('restore_replica_location_after_upgrade', None) + + +class ReplicaMetricLoadDescription(msrest.serialization.Model): + """Specifies metric loads of a partition's specific secondary replica or instance. :param node_name: Node name of a specific secondary replica or instance. :type node_name: str - :param replica_or_instance_load_entries: Loads of a different metrics for - a partition's secondary replica or instance. - :type replica_or_instance_load_entries: - list[~azure.servicefabric.models.MetricLoadDescription] + :param replica_or_instance_load_entries: Loads of a different metrics for a partition's + secondary replica or instance. + :type replica_or_instance_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] """ _attribute_map = { @@ -17019,43 +18696,45 @@ class ReplicaMetricLoadDescription(Model): 'replica_or_instance_load_entries': {'key': 'ReplicaOrInstanceLoadEntries', 'type': '[MetricLoadDescription]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicaMetricLoadDescription, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.replica_or_instance_load_entries = kwargs.get('replica_or_instance_load_entries', None) class ReplicasHealthEvaluation(HealthEvaluation): - """Represents health evaluation for replicas, containing health evaluations - for each unhealthy replica that impacted current aggregated health state. - Can be returned when evaluating partition health and the aggregated health - state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for replicas, containing health evaluations for each unhealthy replica that impacted current aggregated health state. Can be returned when evaluating partition health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_replicas_per_partition: Maximum allowed - percentage of unhealthy replicas per partition from the - ApplicationHealthPolicy. + :param max_percent_unhealthy_replicas_per_partition: Maximum allowed percentage of unhealthy + replicas per partition from the ApplicationHealthPolicy. :type max_percent_unhealthy_replicas_per_partition: int - :param total_count: Total number of replicas in the partition from the - health store. + :param total_count: Total number of replicas in the partition from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - ReplicaHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy ReplicaHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -17063,58 +18742,54 @@ class ReplicasHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_replicas_per_partition': {'key': 'MaxPercentUnhealthyReplicasPerPartition', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicasHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Replicas' # type: str self.max_percent_unhealthy_replicas_per_partition = kwargs.get('max_percent_unhealthy_replicas_per_partition', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Replicas' -class ReplicatorQueueStatus(Model): - """Provides various statistics of the queue used in the service fabric - replicator. - Contains information about the service fabric replicator like the - replication/copy queue utilization, last acknowledgement received - timestamp, etc. - Depending on the role of the replicator, the properties in this type imply - different meanings. +class ReplicatorQueueStatus(msrest.serialization.Model): + """Provides various statistics of the queue used in the service fabric replicator. +Contains information about the service fabric replicator like the replication/copy queue utilization, last acknowledgement received timestamp, etc. +Depending on the role of the replicator, the properties in this type imply different meanings. - :param queue_utilization_percentage: Represents the utilization of the - queue. A value of 0 indicates that the queue is empty and a value of 100 - indicates the queue is full. + :param queue_utilization_percentage: Represents the utilization of the queue. A value of 0 + indicates that the queue is empty and a value of 100 indicates the queue is full. :type queue_utilization_percentage: int - :param queue_memory_size: Represents the virtual memory consumed by the - queue in bytes. + :param queue_memory_size: Represents the virtual memory consumed by the queue in bytes. :type queue_memory_size: str - :param first_sequence_number: On a primary replicator, this is - semantically the sequence number of the operation for which all the - secondary replicas have sent an acknowledgement. - On a secondary replicator, this is the smallest sequence number of the - operation that is present in the queue. + :param first_sequence_number: On a primary replicator, this is semantically the sequence number + of the operation for which all the secondary replicas have sent an acknowledgement. + On a secondary replicator, this is the smallest sequence number of the operation that is + present in the queue. :type first_sequence_number: str - :param completed_sequence_number: On a primary replicator, this is - semantically the highest sequence number of the operation for which all - the secondary replicas have sent an acknowledgement. - On a secondary replicator, this is semantically the highest sequence - number that has been applied to the persistent state. + :param completed_sequence_number: On a primary replicator, this is semantically the highest + sequence number of the operation for which all the secondary replicas have sent an + acknowledgement. + On a secondary replicator, this is semantically the highest sequence number that has been + applied to the persistent state. :type completed_sequence_number: str - :param committed_sequence_number: On a primary replicator, this is - semantically the highest sequence number of the operation for which a - write quorum of the secondary replicas have sent an acknowledgement. - On a secondary replicator, this is semantically the highest sequence - number of the in-order operation received from the primary. + :param committed_sequence_number: On a primary replicator, this is semantically the highest + sequence number of the operation for which a write quorum of the secondary replicas have sent + an acknowledgement. + On a secondary replicator, this is semantically the highest sequence number of the in-order + operation received from the primary. :type committed_sequence_number: str - :param last_sequence_number: Represents the latest sequence number of the - operation that is available in the queue. + :param last_sequence_number: Represents the latest sequence number of the operation that is + available in the queue. :type last_sequence_number: str """ @@ -17127,7 +18802,10 @@ class ReplicatorQueueStatus(Model): 'last_sequence_number': {'key': 'LastSequenceNumber', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ReplicatorQueueStatus, self).__init__(**kwargs) self.queue_utilization_percentage = kwargs.get('queue_utilization_percentage', None) self.queue_memory_size = kwargs.get('queue_memory_size', None) @@ -17137,16 +18815,14 @@ def __init__(self, **kwargs): self.last_sequence_number = kwargs.get('last_sequence_number', None) -class ResolvedServiceEndpoint(Model): +class ResolvedServiceEndpoint(msrest.serialization.Model): """Endpoint of a resolved service partition. - :param kind: The role of the replica where the endpoint is reported. - Possible values include: 'Invalid', 'Stateless', 'StatefulPrimary', - 'StatefulSecondary' + :param kind: The role of the replica where the endpoint is reported. Possible values include: + "Invalid", "Stateless", "StatefulPrimary", "StatefulSecondary". :type kind: str or ~azure.servicefabric.models.ServiceEndpointRole - :param address: The address of the endpoint. If the endpoint has multiple - listeners the address is a JSON object with one property per listener with - the value as the address of that listener. + :param address: The address of the endpoint. If the endpoint has multiple listeners the address + is a JSON object with one property per listener with the value as the address of that listener. :type address: str """ @@ -17155,30 +18831,29 @@ class ResolvedServiceEndpoint(Model): 'address': {'key': 'Address', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ResolvedServiceEndpoint, self).__init__(**kwargs) self.kind = kwargs.get('kind', None) self.address = kwargs.get('address', None) -class ResolvedServicePartition(Model): +class ResolvedServicePartition(msrest.serialization.Model): """Information about a service partition and its associated endpoints. All required parameters must be populated in order to send to Azure. - :param name: Required. The full name of the service with 'fabric:' URI - scheme. + :param name: Required. The full name of the service with 'fabric:' URI scheme. :type name: str - :param partition_information: Required. A representation of the resolved - partition. - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param endpoints: Required. List of resolved service endpoints of a - service partition. + :param partition_information: Required. A representation of the resolved partition. + :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param endpoints: Required. List of resolved service endpoints of a service partition. :type endpoints: list[~azure.servicefabric.models.ResolvedServiceEndpoint] - :param version: Required. The version of this resolved service partition - result. This version should be passed in the next time the ResolveService - call is made via the PreviousRspVersion query parameter. + :param version: Required. The version of this resolved service partition result. This version + should be passed in the next time the ResolveService call is made via the PreviousRspVersion + query parameter. :type version: str """ @@ -17196,23 +18871,23 @@ class ResolvedServicePartition(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ResolvedServicePartition, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.partition_information = kwargs.get('partition_information', None) - self.endpoints = kwargs.get('endpoints', None) - self.version = kwargs.get('version', None) + self.name = kwargs['name'] + self.partition_information = kwargs['partition_information'] + self.endpoints = kwargs['endpoints'] + self.version = kwargs['version'] -class ResourceLimits(Model): - """This type describes the resource limits for a given container. It describes - the most amount of resources a container is allowed to use before being - restarted. +class ResourceLimits(msrest.serialization.Model): + """This type describes the resource limits for a given container. It describes the most amount of resources a container is allowed to use before being restarted. :param memory_in_gb: The memory limit in GB. :type memory_in_gb: float - :param cpu: CPU limits in cores. At present, only full cores are - supported. + :param cpu: CPU limits in cores. At present, only full cores are supported. :type cpu: float """ @@ -17221,26 +18896,23 @@ class ResourceLimits(Model): 'cpu': {'key': 'cpu', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ResourceLimits, self).__init__(**kwargs) self.memory_in_gb = kwargs.get('memory_in_gb', None) self.cpu = kwargs.get('cpu', None) -class ResourceRequests(Model): - """This type describes the requested resources for a given container. It - describes the least amount of resources required for the container. A - container can consume more than requested resources up to the specified - limits before being restarted. Currently, the requested resources are - treated as limits. +class ResourceRequests(msrest.serialization.Model): + """This type describes the requested resources for a given container. It describes the least amount of resources required for the container. A container can consume more than requested resources up to the specified limits before being restarted. Currently, the requested resources are treated as limits. All required parameters must be populated in order to send to Azure. - :param memory_in_gb: Required. The memory request in GB for this - container. + :param memory_in_gb: Required. The memory request in GB for this container. :type memory_in_gb: float - :param cpu: Required. Requested number of CPU cores. At present, only full - cores are supported. + :param cpu: Required. Requested number of CPU cores. At present, only full cores are supported. :type cpu: float """ @@ -17254,22 +18926,23 @@ class ResourceRequests(Model): 'cpu': {'key': 'cpu', 'type': 'float'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ResourceRequests, self).__init__(**kwargs) - self.memory_in_gb = kwargs.get('memory_in_gb', None) - self.cpu = kwargs.get('cpu', None) + self.memory_in_gb = kwargs['memory_in_gb'] + self.cpu = kwargs['cpu'] -class ResourceRequirements(Model): +class ResourceRequirements(msrest.serialization.Model): """This type describes the resource requirements for a container or a service. All required parameters must be populated in order to send to Azure. - :param requests: Required. Describes the requested resources for a given - container. + :param requests: Required. Describes the requested resources for a given container. :type requests: ~azure.servicefabric.models.ResourceRequests - :param limits: Describes the maximum limits on the resources for a given - container. + :param limits: Describes the maximum limits on the resources for a given container. :type limits: ~azure.servicefabric.models.ResourceLimits """ @@ -17282,41 +18955,42 @@ class ResourceRequirements(Model): 'limits': {'key': 'limits', 'type': 'ResourceLimits'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ResourceRequirements, self).__init__(**kwargs) - self.requests = kwargs.get('requests', None) + self.requests = kwargs['requests'] self.limits = kwargs.get('limits', None) -class RestartDeployedCodePackageDescription(Model): - """Defines description for restarting a deployed code package on Service - Fabric node. +class RestartDeployedCodePackageDescription(msrest.serialization.Model): + """Defines description for restarting a deployed code package on Service Fabric node. All required parameters must be populated in order to send to Azure. - :param service_manifest_name: Required. The name of service manifest that - specified this code package. + :param service_manifest_name: Required. The name of service manifest that specified this code + package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param code_package_name: Required. The name of the code package defined - in the service manifest. + :param code_package_name: Required. The name of the code package defined in the service + manifest. :type code_package_name: str - :param code_package_instance_id: Required. The instance ID for currently - running entry point. For a code package setup entry point (if specified) - runs first and after it finishes main entry point is started. - Each time entry point executable is run, its instance ID will change. If 0 - is passed in as the code package instance ID, the API will restart the - code package with whatever instance ID it is currently running. - If an instance ID other than 0 is passed in, the API will restart the code - package only if the current Instance ID matches the passed in instance ID. - Note, passing in the exact instance ID (not 0) in the API is safer, - because if ensures at most one restart of the code package. + :param code_package_instance_id: Required. The instance ID for currently running entry point. + For a code package setup entry point (if specified) runs first and after it finishes main entry + point is started. + Each time entry point executable is run, its instance ID will change. If 0 is passed in as the + code package instance ID, the API will restart the code package with whatever instance ID it is + currently running. + If an instance ID other than 0 is passed in, the API will restart the code package only if the + current Instance ID matches the passed in instance ID. + Note, passing in the exact instance ID (not 0) in the API is safer, because if ensures at most + one restart of the code package. :type code_package_instance_id: str """ @@ -17333,30 +19007,30 @@ class RestartDeployedCodePackageDescription(Model): 'code_package_instance_id': {'key': 'CodePackageInstanceId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RestartDeployedCodePackageDescription, self).__init__(**kwargs) - self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.service_manifest_name = kwargs['service_manifest_name'] self.service_package_activation_id = kwargs.get('service_package_activation_id', None) - self.code_package_name = kwargs.get('code_package_name', None) - self.code_package_instance_id = kwargs.get('code_package_instance_id', None) + self.code_package_name = kwargs['code_package_name'] + self.code_package_instance_id = kwargs['code_package_instance_id'] -class RestartNodeDescription(Model): +class RestartNodeDescription(msrest.serialization.Model): """Describes the parameters to restart a Service Fabric node. All required parameters must be populated in order to send to Azure. - :param node_instance_id: Required. The instance ID of the target node. If - instance ID is specified the node is restarted only if it matches with the - current instance of the node. A default value of "0" would match any - instance ID. The instance ID can be obtained using get node query. Default - value: "0" . + :param node_instance_id: Required. The instance ID of the target node. If instance ID is + specified the node is restarted only if it matches with the current instance of the node. A + default value of "0" would match any instance ID. The instance ID can be obtained using get + node query. :type node_instance_id: str - :param create_fabric_dump: Specify True to create a dump of the fabric - node process. This is case-sensitive. Possible values include: 'False', - 'True'. Default value: "False" . - :type create_fabric_dump: str or - ~azure.servicefabric.models.CreateFabricDump + :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is + case-sensitive. Possible values include: "False", "True". Default value: "False". + :type create_fabric_dump: str or ~azure.servicefabric.models.CreateFabricDump """ _validation = { @@ -17368,21 +19042,23 @@ class RestartNodeDescription(Model): 'create_fabric_dump': {'key': 'CreateFabricDump', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RestartNodeDescription, self).__init__(**kwargs) self.node_instance_id = kwargs.get('node_instance_id', "0") self.create_fabric_dump = kwargs.get('create_fabric_dump', "False") -class RestartPartitionResult(Model): - """Represents information about an operation in a terminal state (Completed or - Faulted). +class RestartPartitionResult(msrest.serialization.Model): + """Represents information about an operation in a terminal state (Completed or Faulted). - :param error_code: If OperationState is Completed, this is 0. If - OperationState is Faulted, this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, + this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the - partition that the user-induced operation acted upon. + :param selected_partition: This class returns information about the partition that the + user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -17391,25 +19067,26 @@ class RestartPartitionResult(Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RestartPartitionResult, self).__init__(**kwargs) self.error_code = kwargs.get('error_code', None) self.selected_partition = kwargs.get('selected_partition', None) -class RestorePartitionDescription(Model): - """Specifies the parameters needed to trigger a restore of a specific - partition. +class RestorePartitionDescription(msrest.serialization.Model): + """Specifies the parameters needed to trigger a restore of a specific partition. All required parameters must be populated in order to send to Azure. :param backup_id: Required. Unique backup ID. :type backup_id: str - :param backup_location: Required. Location of the backup relative to the - backup storage specified/ configured. + :param backup_location: Required. Location of the backup relative to the backup storage + specified/ configured. :type backup_location: str - :param backup_storage: Location of the backup from where the partition - will be restored. + :param backup_storage: Location of the backup from where the partition will be restored. :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription """ @@ -17424,29 +19101,29 @@ class RestorePartitionDescription(Model): 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RestorePartitionDescription, self).__init__(**kwargs) - self.backup_id = kwargs.get('backup_id', None) - self.backup_location = kwargs.get('backup_location', None) + self.backup_id = kwargs['backup_id'] + self.backup_location = kwargs['backup_location'] self.backup_storage = kwargs.get('backup_storage', None) -class RestoreProgressInfo(Model): +class RestoreProgressInfo(msrest.serialization.Model): """Describes the progress of a restore operation on a partition. - :param restore_state: Represents the current state of the partition - restore operation. Possible values include: 'Invalid', 'Accepted', - 'RestoreInProgress', 'Success', 'Failure', 'Timeout' + :param restore_state: Represents the current state of the partition restore operation. Possible + values include: "Invalid", "Accepted", "RestoreInProgress", "Success", "Failure", "Timeout". :type restore_state: str or ~azure.servicefabric.models.RestoreState :param time_stamp_utc: Timestamp when operation succeeded or failed. - :type time_stamp_utc: datetime - :param restored_epoch: Describes the epoch at which the partition is - restored. + :type time_stamp_utc: ~datetime.datetime + :param restored_epoch: Describes the epoch at which the partition is restored. :type restored_epoch: ~azure.servicefabric.models.Epoch :param restored_lsn: Restored LSN. :type restored_lsn: str - :param failure_error: Denotes the failure encountered in performing - restore operation. + :param failure_error: Denotes the failure encountered in performing restore operation. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -17458,7 +19135,10 @@ class RestoreProgressInfo(Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RestoreProgressInfo, self).__init__(**kwargs) self.restore_state = kwargs.get('restore_state', None) self.time_stamp_utc = kwargs.get('time_stamp_utc', None) @@ -17467,14 +19147,13 @@ def __init__(self, **kwargs): self.failure_error = kwargs.get('failure_error', None) -class ResumeApplicationUpgradeDescription(Model): - """Describes the parameters for resuming an unmonitored manual Service Fabric - application upgrade. +class ResumeApplicationUpgradeDescription(msrest.serialization.Model): + """Describes the parameters for resuming an unmonitored manual Service Fabric application upgrade. All required parameters must be populated in order to send to Azure. - :param upgrade_domain_name: Required. The name of the upgrade domain in - which to resume the upgrade. + :param upgrade_domain_name: Required. The name of the upgrade domain in which to resume the + upgrade. :type upgrade_domain_name: str """ @@ -17486,18 +19165,20 @@ class ResumeApplicationUpgradeDescription(Model): 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ResumeApplicationUpgradeDescription, self).__init__(**kwargs) - self.upgrade_domain_name = kwargs.get('upgrade_domain_name', None) + self.upgrade_domain_name = kwargs['upgrade_domain_name'] -class ResumeClusterUpgradeDescription(Model): +class ResumeClusterUpgradeDescription(msrest.serialization.Model): """Describes the parameters for resuming a cluster upgrade. All required parameters must be populated in order to send to Azure. - :param upgrade_domain: Required. The next upgrade domain for this cluster - upgrade. + :param upgrade_domain: Required. The next upgrade domain for this cluster upgrade. :type upgrade_domain: str """ @@ -17509,83 +19190,74 @@ class ResumeClusterUpgradeDescription(Model): 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ResumeClusterUpgradeDescription, self).__init__(**kwargs) - self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.upgrade_domain = kwargs['upgrade_domain'] -class RollingUpgradeUpdateDescription(Model): - """Describes the parameters for updating a rolling upgrade of application or - cluster. +class RollingUpgradeUpdateDescription(msrest.serialization.Model): + """Describes the parameters for updating a rolling upgrade of application or cluster. All required parameters must be populated in order to send to Azure. - :param rolling_upgrade_mode: Required. The mode used to monitor health - during a rolling upgrade. The values are UnmonitoredAuto, - UnmonitoredManual, and Monitored. Possible values include: 'Invalid', - 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: - "UnmonitoredAuto" . + :param rolling_upgrade_mode: Required. The mode used to monitor health during a rolling + upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values + include: "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param replica_set_check_timeout_in_milliseconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param replica_set_check_timeout_in_milliseconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type replica_set_check_timeout_in_milliseconds: long - :param failure_action: The compensating action to perform when a Monitored - upgrade encounters monitoring policy or health policy violations. - Invalid indicates the failure action is invalid. Rollback specifies that - the upgrade will start rolling back automatically. - Manual indicates that the upgrade will switch to UnmonitoredManual upgrade - mode. Possible values include: 'Invalid', 'Rollback', 'Manual' + :param failure_action: The compensating action to perform when a Monitored upgrade encounters + monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will + start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. Possible + values include: "Invalid", "Rollback", "Manual". :type failure_action: str or ~azure.servicefabric.models.FailureAction - :param health_check_wait_duration_in_milliseconds: The amount of time to - wait after completing an upgrade domain before applying health policies. - It is first interpreted as a string representing an ISO 8601 duration. If - that fails, then it is interpreted as a number representing the total - number of milliseconds. + :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing + an upgrade domain before applying health policies. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted as a number + representing the total number of milliseconds. :type health_check_wait_duration_in_milliseconds: str - :param health_check_stable_duration_in_milliseconds: The amount of time - that the application or cluster must remain healthy before the upgrade - proceeds to the next upgrade domain. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param health_check_stable_duration_in_milliseconds: The amount of time that the application or + cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type health_check_stable_duration_in_milliseconds: str - :param health_check_retry_timeout_in_milliseconds: The amount of time to - retry health evaluation when the application or cluster is unhealthy - before FailureAction is executed. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param health_check_retry_timeout_in_milliseconds: The amount of time to retry health + evaluation when the application or cluster is unhealthy before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type health_check_retry_timeout_in_milliseconds: str - :param upgrade_timeout_in_milliseconds: The amount of time the overall - upgrade has to complete before FailureAction is executed. It is first - interpreted as a string representing an ISO 8601 duration. If that fails, - then it is interpreted as a number representing the total number of + :param upgrade_timeout_in_milliseconds: The amount of time the overall upgrade has to complete + before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 + duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout_in_milliseconds: str - :param upgrade_domain_timeout_in_milliseconds: The amount of time each - upgrade domain has to complete before FailureAction is executed. It is - first interpreted as a string representing an ISO 8601 duration. If that - fails, then it is interpreted as a number representing the total number of - milliseconds. + :param upgrade_domain_timeout_in_milliseconds: The amount of time each upgrade domain has to + complete before FailureAction is executed. It is first interpreted as a string representing an + ISO 8601 duration. If that fails, then it is interpreted as a number representing the total + number of milliseconds. :type upgrade_domain_timeout_in_milliseconds: str - :param instance_close_delay_duration_in_seconds: Duration in seconds, to - wait before a stateless instance is closed, to allow the active requests - to drain gracefully. This would be effective when the instance is closing - during the application/cluster - upgrade, only for those instances which have a non-zero delay duration - configured in the service description. See - InstanceCloseDelayDurationSeconds property in $ref: + :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a + stateless instance is closed, to allow the active requests to drain gracefully. This would be + effective when the instance is closing during the application/cluster + upgrade, only for those instances which have a non-zero delay duration configured in the + service description. See InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is - 4294967295, which indicates that the behavior will entirely depend on the - delay configured in the stateless service description. + Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates + that the behavior will entirely depend on the delay configured in the stateless service + description. :type instance_close_delay_duration_in_seconds: long """ @@ -17606,34 +19278,33 @@ class RollingUpgradeUpdateDescription(Model): 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RollingUpgradeUpdateDescription, self).__init__(**kwargs) self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.force_restart = kwargs.get('force_restart', None) - self.replica_set_check_timeout_in_milliseconds = kwargs.get('replica_set_check_timeout_in_milliseconds', None) + self.force_restart = kwargs.get('force_restart', False) + self.replica_set_check_timeout_in_milliseconds = kwargs.get('replica_set_check_timeout_in_milliseconds', 42949672925) self.failure_action = kwargs.get('failure_action', None) - self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', None) - self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', None) - self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', None) - self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', None) - self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', None) - self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', None) + self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', "0") + self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', "PT0H2M0S") + self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', "PT0H10M0S") + self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', "P10675199DT02H48M05.4775807S") + self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', "P10675199DT02H48M05.4775807S") + self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', 4294967295) class RunToCompletionExecutionPolicy(ExecutionPolicy): - """The run to completion execution policy, the service will perform its - desired operation and complete successfully. If the service encounters - failure, it will restarted based on restart policy specified. If the - service completes its operation successfully, it will not be restarted - again. + """The run to completion execution policy, the service will perform its desired operation and complete successfully. If the service encounters failure, it will restarted based on restart policy specified. If the service completes its operation successfully, it will not be restarted again. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param restart: Required. Enumerates the restart policy for - RunToCompletionExecutionPolicy. Possible values include: 'OnFailure', - 'Never' + :param type: Required. Enumerates the execution policy types for services.Constant filled by + server. Possible values include: "Default", "RunToCompletion". + :type type: str or ~azure.servicefabric.models.ExecutionPolicyType + :param restart: Required. Enumerates the restart policy for RunToCompletionExecutionPolicy. + Possible values include: "OnFailure", "Never". :type restart: str or ~azure.servicefabric.models.RestartPolicy """ @@ -17647,20 +19318,21 @@ class RunToCompletionExecutionPolicy(ExecutionPolicy): 'restart': {'key': 'restart', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RunToCompletionExecutionPolicy, self).__init__(**kwargs) - self.restart = kwargs.get('restart', None) - self.type = 'RunToCompletion' + self.type = 'RunToCompletion' # type: str + self.restart = kwargs['restart'] -class SafetyCheckWrapper(Model): - """A wrapper for the safety check object. Safety checks are performed by - service fabric before continuing with the operations. These checks ensure - the availability of the service and the reliability of the state. +class SafetyCheckWrapper(msrest.serialization.Model): + """A wrapper for the safety check object. Safety checks are performed by service fabric before continuing with the operations. These checks ensure the availability of the service and the reliability of the state. - :param safety_check: Represents a safety check performed by service fabric - before continuing with the operations. These checks ensure the - availability of the service and the reliability of the state. + :param safety_check: Represents a safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. :type safety_check: ~azure.servicefabric.models.SafetyCheck """ @@ -17668,24 +19340,24 @@ class SafetyCheckWrapper(Model): 'safety_check': {'key': 'SafetyCheck', 'type': 'SafetyCheck'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SafetyCheckWrapper, self).__init__(**kwargs) self.safety_check = kwargs.get('safety_check', None) -class ScalingPolicyDescription(Model): +class ScalingPolicyDescription(msrest.serialization.Model): """Describes how the scaling should be performed. All required parameters must be populated in order to send to Azure. - :param scaling_trigger: Required. Specifies the trigger associated with - this scaling policy - :type scaling_trigger: - ~azure.servicefabric.models.ScalingTriggerDescription - :param scaling_mechanism: Required. Specifies the mechanism associated - with this scaling policy - :type scaling_mechanism: - ~azure.servicefabric.models.ScalingMechanismDescription + :param scaling_trigger: Required. Specifies the trigger associated with this scaling policy. + :type scaling_trigger: ~azure.servicefabric.models.ScalingTriggerDescription + :param scaling_mechanism: Required. Specifies the mechanism associated with this scaling + policy. + :type scaling_mechanism: ~azure.servicefabric.models.ScalingMechanismDescription """ _validation = { @@ -17698,49 +19370,47 @@ class ScalingPolicyDescription(Model): 'scaling_mechanism': {'key': 'ScalingMechanism', 'type': 'ScalingMechanismDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ScalingPolicyDescription, self).__init__(**kwargs) - self.scaling_trigger = kwargs.get('scaling_trigger', None) - self.scaling_mechanism = kwargs.get('scaling_mechanism', None) + self.scaling_trigger = kwargs['scaling_trigger'] + self.scaling_mechanism = kwargs['scaling_mechanism'] class SecondaryReplicatorStatus(ReplicatorStatus): - """Provides statistics about the Service Fabric Replicator, when it is - functioning in a ActiveSecondary role. + """Provides statistics about the Service Fabric Replicator, when it is functioning in a ActiveSecondary role. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SecondaryActiveReplicatorStatus, - SecondaryIdleReplicatorStatus - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. Constant filled by server. - :type kind: str - :param replication_queue_status: Details about the replication queue on - the secondary replicator. - :type replication_queue_status: - ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp - (UTC) at which a replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation - message was never received. - :type last_replication_operation_received_time_utc: datetime - :param is_in_build: Value that indicates whether the replica is currently - being built. - :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary + sub-classes are: SecondaryActiveReplicatorStatus, SecondaryIdleReplicatorStatus. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param replication_queue_status: Details about the replication queue on the secondary replicator. + :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a + replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation message was never + received. + :type last_replication_operation_received_time_utc: ~datetime.datetime + :param is_in_build: Value that indicates whether the replica is currently being built. + :type is_in_build: bool + :param copy_queue_status: Details about the copy queue on the secondary replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at - which a copy operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation - message was never received. - :type last_copy_operation_received_time_utc: datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at - which an acknowledgment was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment - message was never sent. - :type last_acknowledgement_sent_time_utc: datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy + operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation message was never + received. + :type last_copy_operation_received_time_utc: ~datetime.datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment + was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. + :type last_acknowledgement_sent_time_utc: ~datetime.datetime """ _validation = { @@ -17761,50 +19431,49 @@ class SecondaryReplicatorStatus(ReplicatorStatus): 'kind': {'ActiveSecondary': 'SecondaryActiveReplicatorStatus', 'IdleSecondary': 'SecondaryIdleReplicatorStatus'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SecondaryReplicatorStatus, self).__init__(**kwargs) + self.kind = 'SecondaryReplicatorStatus' # type: str self.replication_queue_status = kwargs.get('replication_queue_status', None) self.last_replication_operation_received_time_utc = kwargs.get('last_replication_operation_received_time_utc', None) self.is_in_build = kwargs.get('is_in_build', None) self.copy_queue_status = kwargs.get('copy_queue_status', None) self.last_copy_operation_received_time_utc = kwargs.get('last_copy_operation_received_time_utc', None) self.last_acknowledgement_sent_time_utc = kwargs.get('last_acknowledgement_sent_time_utc', None) - self.kind = 'SecondaryReplicatorStatus' class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): - """Status of the secondary replicator when it is in active mode and is part of - the replica set. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. Constant filled by server. - :type kind: str - :param replication_queue_status: Details about the replication queue on - the secondary replicator. - :type replication_queue_status: - ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp - (UTC) at which a replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation - message was never received. - :type last_replication_operation_received_time_utc: datetime - :param is_in_build: Value that indicates whether the replica is currently - being built. - :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary + """Status of the secondary replicator when it is in active mode and is part of the replica set. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param replication_queue_status: Details about the replication queue on the secondary replicator. + :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a + replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation message was never + received. + :type last_replication_operation_received_time_utc: ~datetime.datetime + :param is_in_build: Value that indicates whether the replica is currently being built. + :type is_in_build: bool + :param copy_queue_status: Details about the copy queue on the secondary replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at - which a copy operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation - message was never received. - :type last_copy_operation_received_time_utc: datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at - which an acknowledgment was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment - message was never sent. - :type last_acknowledgement_sent_time_utc: datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy + operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation message was never + received. + :type last_copy_operation_received_time_utc: ~datetime.datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment + was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. + :type last_acknowledgement_sent_time_utc: ~datetime.datetime """ _validation = { @@ -17821,44 +19490,43 @@ class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SecondaryActiveReplicatorStatus, self).__init__(**kwargs) - self.kind = 'ActiveSecondary' + self.kind = 'ActiveSecondary' # type: str class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): - """Status of the secondary replicator when it is in idle mode and is being - built by the primary. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. Constant filled by server. - :type kind: str - :param replication_queue_status: Details about the replication queue on - the secondary replicator. - :type replication_queue_status: - ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp - (UTC) at which a replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation - message was never received. - :type last_replication_operation_received_time_utc: datetime - :param is_in_build: Value that indicates whether the replica is currently - being built. - :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary + """Status of the secondary replicator when it is in idle mode and is being built by the primary. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param replication_queue_status: Details about the replication queue on the secondary replicator. + :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a + replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation message was never + received. + :type last_replication_operation_received_time_utc: ~datetime.datetime + :param is_in_build: Value that indicates whether the replica is currently being built. + :type is_in_build: bool + :param copy_queue_status: Details about the copy queue on the secondary replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at - which a copy operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation - message was never received. - :type last_copy_operation_received_time_utc: datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at - which an acknowledgment was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment - message was never sent. - :type last_acknowledgement_sent_time_utc: datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy + operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation message was never + received. + :type last_copy_operation_received_time_utc: ~datetime.datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment + was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. + :type last_acknowledgement_sent_time_utc: ~datetime.datetime """ _validation = { @@ -17875,18 +19543,20 @@ class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SecondaryIdleReplicatorStatus, self).__init__(**kwargs) - self.kind = 'IdleSecondary' + self.kind = 'IdleSecondary' # type: str -class SecretResourceDescription(Model): +class SecretResourceDescription(msrest.serialization.Model): """This type describes a secret resource. All required parameters must be populated in order to send to Azure. - :param properties: Required. Describes the properties of a secret - resource. + :param properties: Required. Describes the properties of a secret resource. :type properties: ~azure.servicefabric.models.SecretResourceProperties :param name: Required. Name of the Secret resource. :type name: str @@ -17902,13 +19572,16 @@ class SecretResourceDescription(Model): 'name': {'key': 'name', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SecretResourceDescription, self).__init__(**kwargs) - self.properties = kwargs.get('properties', None) - self.name = kwargs.get('name', None) + self.properties = kwargs['properties'] + self.name = kwargs['name'] -class SecretValue(Model): +class SecretValue(msrest.serialization.Model): """This type represents the unencrypted value of the secret. :param value: The actual value of the secret. @@ -17919,12 +19592,15 @@ class SecretValue(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SecretValue, self).__init__(**kwargs) self.value = kwargs.get('value', None) -class SecretValueProperties(Model): +class SecretValueProperties(msrest.serialization.Model): """This type describes properties of secret value resource. :param value: The actual value of the secret. @@ -17935,14 +19611,16 @@ class SecretValueProperties(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SecretValueProperties, self).__init__(**kwargs) self.value = kwargs.get('value', None) -class SecretValueResourceDescription(Model): - """This type describes a value of a secret resource. The name of this resource - is the version identifier corresponding to this secret value. +class SecretValueResourceDescription(msrest.serialization.Model): + """This type describes a value of a secret resource. The name of this resource is the version identifier corresponding to this secret value. All required parameters must be populated in order to send to Azure. @@ -17961,20 +19639,44 @@ class SecretValueResourceDescription(Model): 'value': {'key': 'properties.value', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SecretValueResourceDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.value = kwargs.get('value', None) +class SecretValueResourceProperties(SecretValueProperties): + """This type describes properties of a secret value resource. + + :param value: The actual value of the secret. + :type value: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SecretValueResourceProperties, self).__init__(**kwargs) + + class SeedNodeSafetyCheck(SafetyCheck): - """Represents a safety check for the seed nodes being performed by service - fabric before continuing with node level operations. + """Represents a safety check for the seed nodes being performed by service fabric before continuing with node level operations. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind """ _validation = { @@ -17985,22 +19687,23 @@ class SeedNodeSafetyCheck(SafetyCheck): 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SeedNodeSafetyCheck, self).__init__(**kwargs) - self.kind = 'EnsureSeedNodeQuorum' + self.kind = 'EnsureSeedNodeQuorum' # type: str -class SelectedPartition(Model): - """This class returns information about the partition that the user-induced - operation acted upon. +class SelectedPartition(msrest.serialization.Model): + """This class returns information about the partition that the user-induced operation acted upon. :param service_name: The name of the service the partition belongs to. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str """ @@ -18009,33 +19712,33 @@ class SelectedPartition(Model): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SelectedPartition, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) class ServiceBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information for a specific Service Fabric service - specifying what backup policy is being applied and suspend description, if - any. + """Backup configuration information for a specific Service Fabric service specifying what backup policy is being applied and suspend description, if any. All required parameters must be populated in order to send to Azure. - :param policy_name: The name of the backup policy which is applicable to - this Service Fabric application or service or partition. + :param kind: Required. The entity type of a Service Fabric entity such as Application, Service + or a Partition where periodic backups can be enabled.Constant filled by server. Possible + values include: "Invalid", "Partition", "Service", "Application". + :type kind: str or ~azure.servicefabric.models.BackupEntityKind + :param policy_name: The name of the backup policy which is applicable to this Service Fabric + application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup - policy is applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type policy_inherited_from: str or - ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup policy is applied. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param kind: Required. Constant filled by server. - :type kind: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str """ @@ -18044,17 +19747,20 @@ class ServiceBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceBackupConfigurationInfo, self).__init__(**kwargs) + self.kind = 'Service' # type: str self.service_name = kwargs.get('service_name', None) - self.kind = 'Service' class ServiceBackupEntity(BackupEntity): @@ -18062,10 +19768,11 @@ class ServiceBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. Constant filled by server. - :type entity_kind: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, + Service or a Partition where periodic backups can be enabled.Constant filled by server. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str """ @@ -18078,24 +19785,26 @@ class ServiceBackupEntity(BackupEntity): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceBackupEntity, self).__init__(**kwargs) + self.entity_kind = 'Service' # type: str self.service_name = kwargs.get('service_name', None) - self.entity_kind = 'Service' -class ServiceCorrelationDescription(Model): +class ServiceCorrelationDescription(msrest.serialization.Model): """Creates a particular correlation between services. All required parameters must be populated in order to send to Azure. - :param scheme: Required. The ServiceCorrelationScheme which describes the - relationship between this service and the service specified via - ServiceName. Possible values include: 'Invalid', 'Affinity', - 'AlignedAffinity', 'NonAlignedAffinity' + :param scheme: Required. The ServiceCorrelationScheme which describes the relationship between + this service and the service specified via ServiceName. Possible values include: "Invalid", + "Affinity", "AlignedAffinity", "NonAlignedAffinity". :type scheme: str or ~azure.servicefabric.models.ServiceCorrelationScheme - :param service_name: Required. The name of the service that the - correlation relationship is established with. + :param service_name: Required. The name of the service that the correlation relationship is + established with. :type service_name: str """ @@ -18109,67 +19818,91 @@ class ServiceCorrelationDescription(Model): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceCorrelationDescription, self).__init__(**kwargs) - self.scheme = kwargs.get('scheme', None) - self.service_name = kwargs.get('service_name', None) + self.scheme = kwargs['scheme'] + self.service_name = kwargs['service_name'] class ServiceEvent(FabricEvent): """Represents the base for all Service Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, - ServiceNewHealthReportEvent, ServiceHealthReportExpiredEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, ServiceHealthReportExpiredEvent, ServiceNewHealthReportEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceNewHealthReport': 'ServiceNewHealthReportEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent'} + 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent', 'ServiceNewHealthReport': 'ServiceNewHealthReportEvent'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceEvent, self).__init__(**kwargs) - self.service_id = kwargs.get('service_id', None) - self.kind = 'ServiceEvent' + self.kind = 'ServiceEvent' # type: str + self.service_id = kwargs['service_id'] class ServiceCreatedEvent(ServiceEvent): @@ -18177,25 +19910,44 @@ class ServiceCreatedEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str :param service_type_name: Required. Service type name. :type service_type_name: str @@ -18215,18 +19967,17 @@ class ServiceCreatedEvent(ServiceEvent): :type min_replica_set_size: int :param service_package_version: Required. Version of Service package. :type service_package_version: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, 'service_type_name': {'required': True}, 'application_name': {'required': True}, @@ -18241,11 +19992,11 @@ class ServiceCreatedEvent(ServiceEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, @@ -18259,19 +20010,22 @@ class ServiceCreatedEvent(ServiceEvent): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceCreatedEvent, self).__init__(**kwargs) - self.service_type_name = kwargs.get('service_type_name', None) - self.application_name = kwargs.get('application_name', None) - self.application_type_name = kwargs.get('application_type_name', None) - self.service_instance = kwargs.get('service_instance', None) - self.is_stateful = kwargs.get('is_stateful', None) - self.partition_count = kwargs.get('partition_count', None) - self.target_replica_set_size = kwargs.get('target_replica_set_size', None) - self.min_replica_set_size = kwargs.get('min_replica_set_size', None) - self.service_package_version = kwargs.get('service_package_version', None) - self.partition_id = kwargs.get('partition_id', None) - self.kind = 'ServiceCreated' + self.kind = 'ServiceCreated' # type: str + self.service_type_name = kwargs['service_type_name'] + self.application_name = kwargs['application_name'] + self.application_type_name = kwargs['application_type_name'] + self.service_instance = kwargs['service_instance'] + self.is_stateful = kwargs['is_stateful'] + self.partition_count = kwargs['partition_count'] + self.target_replica_set_size = kwargs['target_replica_set_size'] + self.min_replica_set_size = kwargs['min_replica_set_size'] + self.service_package_version = kwargs['service_package_version'] + self.partition_id = kwargs['partition_id'] class ServiceDeletedEvent(ServiceEvent): @@ -18279,25 +20033,44 @@ class ServiceDeletedEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str :param service_type_name: Required. Service type name. :type service_type_name: str @@ -18320,9 +20093,9 @@ class ServiceDeletedEvent(ServiceEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, 'service_type_name': {'required': True}, 'application_name': {'required': True}, @@ -18336,11 +20109,11 @@ class ServiceDeletedEvent(ServiceEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, @@ -18353,90 +20126,87 @@ class ServiceDeletedEvent(ServiceEvent): 'service_package_version': {'key': 'ServicePackageVersion', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceDeletedEvent, self).__init__(**kwargs) - self.service_type_name = kwargs.get('service_type_name', None) - self.application_name = kwargs.get('application_name', None) - self.application_type_name = kwargs.get('application_type_name', None) - self.service_instance = kwargs.get('service_instance', None) - self.is_stateful = kwargs.get('is_stateful', None) - self.partition_count = kwargs.get('partition_count', None) - self.target_replica_set_size = kwargs.get('target_replica_set_size', None) - self.min_replica_set_size = kwargs.get('min_replica_set_size', None) - self.service_package_version = kwargs.get('service_package_version', None) - self.kind = 'ServiceDeleted' + self.kind = 'ServiceDeleted' # type: str + self.service_type_name = kwargs['service_type_name'] + self.application_name = kwargs['application_name'] + self.application_type_name = kwargs['application_type_name'] + self.service_instance = kwargs['service_instance'] + self.is_stateful = kwargs['is_stateful'] + self.partition_count = kwargs['partition_count'] + self.target_replica_set_size = kwargs['target_replica_set_size'] + self.min_replica_set_size = kwargs['min_replica_set_size'] + self.service_package_version = kwargs['service_package_version'] -class ServiceDescription(Model): - """A ServiceDescription contains all of the information necessary to create a - service. +class ServiceDescription(msrest.serialization.Model): + """A ServiceDescription contains all of the information necessary to create a service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceDescription, StatelessServiceDescription + sub-classes are: StatefulServiceDescription, StatelessServiceDescription. All required parameters must be populated in order to send to Azure. - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' - URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified - in the service manifest. + :param service_type_name: Required. Name of the service type as specified in the service + manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. - Initialization data is passed to service instances or replicas when they - are created. + :param initialization_data: The initialization data as an array of bytes. Initialization data + is passed to service instances or replicas when they are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an - object. - :type partition_description: - ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an object. + :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost - property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service - package to be used for a service. Possible values include: - 'SharedProcess', 'ExclusiveProcess' + :param service_package_activation_mode: The activation mode of service package to be used for a + service. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS - system service to be enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS system service to be + enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param tags_required_to_place: Tags for placement of this service. + :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :param tags_required_to_run: Tags for running of this service. + :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription """ _validation = { + 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, - 'service_kind': {'required': True}, } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -18451,20 +20221,25 @@ class ServiceDescription(Model): 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, + 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceDescription', 'Stateless': 'StatelessServiceDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceDescription, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.application_name = kwargs.get('application_name', None) - self.service_name = kwargs.get('service_name', None) - self.service_type_name = kwargs.get('service_type_name', None) + self.service_name = kwargs['service_name'] + self.service_type_name = kwargs['service_type_name'] self.initialization_data = kwargs.get('initialization_data', None) - self.partition_description = kwargs.get('partition_description', None) + self.partition_description = kwargs['partition_description'] self.placement_constraints = kwargs.get('placement_constraints', None) self.correlation_scheme = kwargs.get('correlation_scheme', None) self.service_load_metrics = kwargs.get('service_load_metrics', None) @@ -18474,34 +20249,31 @@ def __init__(self, **kwargs): self.service_package_activation_mode = kwargs.get('service_package_activation_mode', None) self.service_dns_name = kwargs.get('service_dns_name', None) self.scaling_policies = kwargs.get('scaling_policies', None) - self.service_kind = None + self.tags_required_to_place = kwargs.get('tags_required_to_place', None) + self.tags_required_to_run = kwargs.get('tags_required_to_run', None) -class ServiceFromTemplateDescription(Model): - """Defines description for creating a Service Fabric service from a template - defined in the application manifest. +class ServiceFromTemplateDescription(msrest.serialization.Model): + """Defines description for creating a Service Fabric service from a template defined in the application manifest. All required parameters must be populated in order to send to Azure. - :param application_name: Required. The name of the application, including - the 'fabric:' URI scheme. + :param application_name: Required. The name of the application, including the 'fabric:' URI + scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' - URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified - in the service manifest. + :param service_type_name: Required. Name of the service type as specified in the service + manifest. :type service_type_name: str - :param initialization_data: The initialization data for the newly created - service instance. + :param initialization_data: The initialization data for the newly created service instance. :type initialization_data: list[int] - :param service_package_activation_mode: The activation mode of service - package to be used for a service. Possible values include: - 'SharedProcess', 'ExclusiveProcess' + :param service_package_activation_mode: The activation mode of service package to be used for a + service. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS - system service to be enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS system service to be + enabled in Service Fabric cluster. :type service_dns_name: str """ @@ -18520,11 +20292,14 @@ class ServiceFromTemplateDescription(Model): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceFromTemplateDescription, self).__init__(**kwargs) - self.application_name = kwargs.get('application_name', None) - self.service_name = kwargs.get('service_name', None) - self.service_type_name = kwargs.get('service_type_name', None) + self.application_name = kwargs['application_name'] + self.service_name = kwargs['service_name'] + self.service_type_name = kwargs['service_type_name'] self.initialization_data = kwargs.get('initialization_data', None) self.service_package_activation_mode = kwargs.get('service_package_activation_mode', None) self.service_dns_name = kwargs.get('service_dns_name', None) @@ -18533,30 +20308,26 @@ def __init__(self, **kwargs): class ServiceHealth(EntityHealth): """Information about the health of a Service Fabric service. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: The name of the service whose health information is described - by this object. + :param name: The name of the service whose health information is described by this object. :type name: str - :param partition_health_states: The list of partition health states - associated with the service. - :type partition_health_states: - list[~azure.servicefabric.models.PartitionHealthState] + :param partition_health_states: The list of partition health states associated with the + service. + :type partition_health_states: list[~azure.servicefabric.models.PartitionHealthState] """ _attribute_map = { @@ -18568,40 +20339,43 @@ class ServiceHealth(EntityHealth): 'partition_health_states': {'key': 'PartitionHealthStates', 'type': '[PartitionHealthState]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceHealth, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.partition_health_states = kwargs.get('partition_health_states', None) class ServiceHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a service, containing information about - the data and the algorithm used by health store to evaluate health. The - evaluation is returned only when the aggregated health state is either - Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a service, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param service_name: Name of the service whose health evaluation is - described by this object. + :param service_name: Name of the service whose health evaluation is described by this object. :type service_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the service. The types of the - unhealthy evaluations can be PartitionsHealthEvaluation or - EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the service. The types of the unhealthy evaluations can be + PartitionsHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -18609,18 +20383,21 @@ class ServiceHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Service' # type: str self.service_name = kwargs.get('service_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Service' class ServiceHealthReportExpiredEvent(ServiceEvent): @@ -18628,25 +20405,44 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str :param instance_id: Required. Id of Service instance. :type instance_id: long @@ -18662,17 +20458,16 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, 'instance_id': {'required': True}, 'source_id': {'required': True}, @@ -18686,11 +20481,11 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -18703,32 +20498,31 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceHealthReportExpiredEvent, self).__init__(**kwargs) - self.instance_id = kwargs.get('instance_id', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'ServiceHealthReportExpired' + self.kind = 'ServiceHealthReportExpired' # type: str + self.instance_id = kwargs['instance_id'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class ServiceHealthState(EntityHealthState): - """Represents the health state of a service, which contains the service - identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param service_name: Name of the service whose health state is represented - by this object. + """Represents the health state of a service, which contains the service identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param service_name: Name of the service whose health state is represented by this object. :type service_name: str """ @@ -18737,28 +20531,27 @@ class ServiceHealthState(EntityHealthState): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceHealthState, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) class ServiceHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a service, which contains the service - name, its aggregated health state and any partitions that respect the - filters in the cluster health chunk query description. + """Represents the health state chunk of a service, which contains the service name, its aggregated health state and any partitions that respect the filters in the cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_name: The name of the service whose health state chunk is - provided in this object. + :param service_name: The name of the service whose health state chunk is provided in this + object. :type service_name: str - :param partition_health_state_chunks: The list of partition health state - chunks belonging to the service that respect the filters in the cluster - health chunk query description. - :type partition_health_state_chunks: - ~azure.servicefabric.models.PartitionHealthStateChunkList + :param partition_health_state_chunks: The list of partition health state chunks belonging to + the service that respect the filters in the cluster health chunk query description. + :type partition_health_state_chunks: ~azure.servicefabric.models.PartitionHealthStateChunkList """ _attribute_map = { @@ -18767,18 +20560,20 @@ class ServiceHealthStateChunk(EntityHealthStateChunk): 'partition_health_state_chunks': {'key': 'PartitionHealthStateChunks', 'type': 'PartitionHealthStateChunkList'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceHealthStateChunk, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) self.partition_health_state_chunks = kwargs.get('partition_health_state_chunks', None) -class ServiceHealthStateChunkList(Model): - """The list of service health state chunks that respect the input filters in - the chunk query. Returned by get cluster health state chunks query. +class ServiceHealthStateChunkList(msrest.serialization.Model): + """The list of service health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param items: The list of service health state chunks that respect the - input filters in the chunk query. + :param items: The list of service health state chunks that respect the input filters in the + chunk query. :type items: list[~azure.servicefabric.models.ServiceHealthStateChunk] """ @@ -18786,67 +20581,58 @@ class ServiceHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[ServiceHealthStateChunk]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class ServiceHealthStateFilter(Model): - """Defines matching criteria to determine whether a service should be included - as a child of an application in the cluster health chunk. - The services are only returned if the parent application matches a filter - specified in the cluster health chunk query description. - One filter can match zero, one or multiple services, depending on its - properties. - - :param service_name_filter: The name of the service that matches the - filter. The filter is applied only to the specified service, if it exists. - If the service doesn't exist, no service is returned in the cluster health - chunk based on this filter. - If the service exists, it is included as the application's child if the - health state matches the other filter properties. - If not specified, all services that match the parent filters (if any) are - taken into consideration and matched against the other filter members, - like health state filter. +class ServiceHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a service should be included as a child of an application in the cluster health chunk. +The services are only returned if the parent application matches a filter specified in the cluster health chunk query description. +One filter can match zero, one or multiple services, depending on its properties. + + :param service_name_filter: The name of the service that matches the filter. The filter is + applied only to the specified service, if it exists. + If the service doesn't exist, no service is returned in the cluster health chunk based on this + filter. + If the service exists, it is included as the application's child if the health state matches + the other filter properties. + If not specified, all services that match the parent filters (if any) are taken into + consideration and matched against the other filter members, like health state filter. :type service_name_filter: str - :param health_state_filter: The filter for the health state of the - services. It allows selecting services if they match the desired health - states. - The possible values are integer value of one of the following health - states. Only services that match the filter are returned. All services are - used to evaluate the cluster aggregated health state. - If not specified, default value is None, unless the service name is - specified. If the filter has default value and service name is specified, - the matching service is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches services with - HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the services. It allows + selecting services if they match the desired health states. + The possible values are integer value of one of the following health states. Only services + that match the filter are returned. All services are used to evaluate the cluster aggregated + health state. + If not specified, default value is None, unless the service name is specified. If the filter + has default value and service name is specified, the matching service is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches services with HealthState value of OK (2) + and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int - :param partition_filters: Defines a list of filters that specify which - partitions to be included in the returned cluster health chunk as children - of the service. The partitions are returned only if the parent service - matches a filter. - If the list is empty, no partitions are returned. All the partitions are - used to evaluate the parent service aggregated health state, regardless of - the input filters. + :param partition_filters: Defines a list of filters that specify which partitions to be + included in the returned cluster health chunk as children of the service. The partitions are + returned only if the parent service matches a filter. + If the list is empty, no partitions are returned. All the partitions are used to evaluate the + parent service aggregated health state, regardless of the input filters. The service filter may specify multiple partition filters. - For example, it can specify a filter to return all partitions with health - state Error and another filter to always include a partition identified by - its partition ID. - :type partition_filters: - list[~azure.servicefabric.models.PartitionHealthStateFilter] + For example, it can specify a filter to return all partitions with health state Error and + another filter to always include a partition identified by its partition ID. + :type partition_filters: list[~azure.servicefabric.models.PartitionHealthStateFilter] """ _attribute_map = { @@ -18855,14 +20641,17 @@ class ServiceHealthStateFilter(Model): 'partition_filters': {'key': 'PartitionFilters', 'type': '[PartitionHealthStateFilter]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceHealthStateFilter, self).__init__(**kwargs) self.service_name_filter = kwargs.get('service_name_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) self.partition_filters = kwargs.get('partition_filters', None) -class ServiceIdentity(Model): +class ServiceIdentity(msrest.serialization.Model): """Map service identity friendly name to an application identity. :param name: The identity friendly name. @@ -18876,47 +20665,48 @@ class ServiceIdentity(Model): 'identity_ref': {'key': 'identityRef', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceIdentity, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.identity_ref = kwargs.get('identity_ref', None) -class ServiceInfo(Model): +class ServiceInfo(msrest.serialization.Model): """Information about a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceInfo, StatelessServiceInfo + sub-classes are: StatefulServiceInfo, StatelessServiceInfo. All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded - representation of the service name. This is used in the REST APIs to - identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param id: The identity of the service. This ID is an encoded representation of the service + name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type id: str + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service - manifest. + :param type_name: Name of the service type as specified in the service manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values - include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', - 'Failed' + :param service_status: The status of the application. Possible values include: "Unknown", + "Active", "Upgrading", "Deleting", "Creating", "Failed". :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -18925,55 +20715,54 @@ class ServiceInfo(Model): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceInfo', 'Stateless': 'StatelessServiceInfo'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) + self.service_kind = None # type: Optional[str] self.name = kwargs.get('name', None) self.type_name = kwargs.get('type_name', None) self.manifest_version = kwargs.get('manifest_version', None) self.health_state = kwargs.get('health_state', None) self.service_status = kwargs.get('service_status', None) self.is_service_group = kwargs.get('is_service_group', None) - self.service_kind = None -class ServiceLoadMetricDescription(Model): +class ServiceLoadMetricDescription(msrest.serialization.Model): """Specifies a metric to load balance a service during runtime. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the metric. If the service chooses to - report load during runtime, the load metric name should match the name - that is specified in Name exactly. Note that metric names are - case-sensitive. + :param name: Required. The name of the metric. If the service chooses to report load during + runtime, the load metric name should match the name that is specified in Name exactly. Note + that metric names are case-sensitive. :type name: str - :param weight: The service load metric relative weight, compared to other - metrics configured for this service, as a number. Possible values include: - 'Zero', 'Low', 'Medium', 'High' + :param weight: The service load metric relative weight, compared to other metrics configured + for this service, as a number. Possible values include: "Zero", "Low", "Medium", "High". :type weight: str or ~azure.servicefabric.models.ServiceLoadMetricWeight - :param primary_default_load: Used only for Stateful services. The default - amount of load, as a number, that this service creates for this metric - when it is a Primary replica. + :param primary_default_load: Used only for Stateful services. The default amount of load, as a + number, that this service creates for this metric when it is a Primary replica. :type primary_default_load: int - :param secondary_default_load: Used only for Stateful services. The - default amount of load, as a number, that this service creates for this - metric when it is a Secondary replica. + :param secondary_default_load: Used only for Stateful services. The default amount of load, as + a number, that this service creates for this metric when it is a Secondary replica. :type secondary_default_load: int - :param default_load: Used only for Stateless services. The default amount - of load, as a number, that this service creates for this metric. + :param default_load: Used only for Stateless services. The default amount of load, as a number, + that this service creates for this metric. :type default_load: int """ @@ -18989,25 +20778,27 @@ class ServiceLoadMetricDescription(Model): 'default_load': {'key': 'DefaultLoad', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceLoadMetricDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.weight = kwargs.get('weight', None) self.primary_default_load = kwargs.get('primary_default_load', None) self.secondary_default_load = kwargs.get('secondary_default_load', None) self.default_load = kwargs.get('default_load', None) -class ServiceNameInfo(Model): +class ServiceNameInfo(msrest.serialization.Model): """Information about the service name. - :param id: The identity of the service. This ID is an encoded - representation of the service name. This is used in the REST APIs to - identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param id: The identity of the service. This ID is an encoded representation of the service + name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type id: str :param name: The full name of the service with 'fabric:' URI scheme. :type name: str @@ -19018,7 +20809,10 @@ class ServiceNameInfo(Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceNameInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.name = kwargs.get('name', None) @@ -19029,25 +20823,44 @@ class ServiceNewHealthReportEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str :param instance_id: Required. Id of Service instance. :type instance_id: long @@ -19063,17 +20876,16 @@ class ServiceNewHealthReportEvent(ServiceEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, 'instance_id': {'required': True}, 'source_id': {'required': True}, @@ -19087,11 +20899,11 @@ class ServiceNewHealthReportEvent(ServiceEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -19104,44 +20916,44 @@ class ServiceNewHealthReportEvent(ServiceEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceNewHealthReportEvent, self).__init__(**kwargs) - self.instance_id = kwargs.get('instance_id', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'ServiceNewHealthReport' - - -class ServicePartitionInfo(Model): + self.kind = 'ServiceNewHealthReport' # type: str + self.instance_id = kwargs['instance_id'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] + + +class ServicePartitionInfo(msrest.serialization.Model): """Information about a partition of a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServicePartitionInfo, - StatelessServicePartitionInfo + sub-classes are: StatefulServicePartitionInfo, StatelessServicePartitionInfo. All required parameters must be populated in order to send to Azure. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service - partition. Possible values include: 'Invalid', 'Ready', 'NotReady', - 'InQuorumLoss', 'Reconfiguring', 'Deleting' - :type partition_status: str or - ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, - partitioning scheme and keys supported by it. - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :param partition_status: The status of the service fabric service partition. Possible values + include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". + :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, partitioning scheme and + keys supported by it. + :type partition_information: ~azure.servicefabric.models.PartitionInformation """ _validation = { @@ -19149,38 +20961,40 @@ class ServicePartitionInfo(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServicePartitionInfo', 'Stateless': 'StatelessServicePartitionInfo'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServicePartitionInfo, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.health_state = kwargs.get('health_state', None) self.partition_status = kwargs.get('partition_status', None) self.partition_information = kwargs.get('partition_information', None) - self.service_kind = None -class ServicePlacementPolicyDescription(Model): +class ServicePlacementPolicyDescription(msrest.serialization.Model): """Describes the policy to be used for placement of a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ServicePlacementInvalidDomainPolicyDescription, - ServicePlacementNonPartiallyPlaceServicePolicyDescription, - ServicePlacementPreferPrimaryDomainPolicyDescription, - ServicePlacementRequiredDomainPolicyDescription, - ServicePlacementRequireDomainDistributionPolicyDescription + sub-classes are: ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, ServicePlacementInvalidDomainPolicyDescription, ServicePlacementNonPartiallyPlaceServicePolicyDescription, ServicePlacementPreferPrimaryDomainPolicyDescription, ServicePlacementRequiredDomainPolicyDescription, ServicePlacementRequireDomainDistributionPolicyDescription. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType """ _validation = { @@ -19192,25 +21006,61 @@ class ServicePlacementPolicyDescription(Model): } _subtype_map = { - 'type': {'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} + 'type': {'AllowMultipleStatelessInstancesOnNode': 'ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription', 'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServicePlacementPolicyDescription, self).__init__(**kwargs) - self.type = None + self.type = None # type: Optional[str] + + +class ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription(ServicePlacementPolicyDescription): + """Describes the policy to be used for placement of a Service Fabric service allowing multiple stateless instances of a partition of the service to be placed on a node. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: Holdover from other policy descriptions, not used for this policy, values + are ignored by runtime. Keeping it for any backwards-compatibility with clients. + :type domain_name: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'domain_name': {'key': 'DomainName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, self).__init__(**kwargs) + self.type = 'AllowMultipleStatelessInstancesOnNode' # type: str + self.domain_name = kwargs.get('domain_name', None) class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where a particular fault or upgrade domain should not be used for placement - of the instances or replicas of that service. + """Describes the policy to be used for placement of a Service Fabric service where a particular fault or upgrade domain should not be used for placement of the instances or replicas of that service. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param domain_name: The name of the domain that should not be used for - placement. + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: The name of the domain that should not be used for placement. :type domain_name: str """ @@ -19223,21 +21073,25 @@ class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescr 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServicePlacementInvalidDomainPolicyDescription, self).__init__(**kwargs) + self.type = 'InvalidDomain' # type: str self.domain_name = kwargs.get('domain_name', None) - self.type = 'InvalidDomain' class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where all replicas must be able to be placed in order for any replicas to - be created. + """Describes the policy to be used for placement of a Service Fabric service where all replicas must be able to be placed in order for any replicas to be created. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType """ _validation = { @@ -19248,29 +21102,27 @@ class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacement 'type': {'key': 'Type', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServicePlacementNonPartiallyPlaceServicePolicyDescription, self).__init__(**kwargs) - self.type = 'NonPartiallyPlaceService' + self.type = 'NonPartiallyPlaceService' # type: str class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where the service's Primary replicas should optimally be placed in a - particular domain. - This placement policy is usually used with fault domains in scenarios where - the Service Fabric cluster is geographically distributed in order to - indicate that a service's primary replica should be located in a particular - fault domain, which in geo-distributed scenarios usually aligns with - regional or datacenter boundaries. Note that since this is an optimization - it is possible that the Primary replica may not end up located in this - domain due to failures, capacity limits, or other constraints. + """Describes the policy to be used for placement of a Service Fabric service where the service's Primary replicas should optimally be placed in a particular domain. + +This placement policy is usually used with fault domains in scenarios where the Service Fabric cluster is geographically distributed in order to indicate that a service's primary replica should be located in a particular fault domain, which in geo-distributed scenarios usually aligns with regional or datacenter boundaries. Note that since this is an optimization it is possible that the Primary replica may not end up located in this domain due to failures, capacity limits, or other constraints. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param domain_name: The name of the domain that should used for placement - as per this policy. + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: The name of the domain that should used for placement as per this policy. :type domain_name: str """ @@ -19283,23 +21135,26 @@ class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolic 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServicePlacementPreferPrimaryDomainPolicyDescription, self).__init__(**kwargs) + self.type = 'PreferPrimaryDomain' # type: str self.domain_name = kwargs.get('domain_name', None) - self.type = 'PreferPrimaryDomain' class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where the instances or replicas of that service must be placed in a - particular domain. + """Describes the policy to be used for placement of a Service Fabric service where the instances or replicas of that service must be placed in a particular domain. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param domain_name: The name of the domain that should used for placement - as per this policy. + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: The name of the domain that should used for placement as per this policy. :type domain_name: str """ @@ -19312,31 +21167,28 @@ class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDesc 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServicePlacementRequiredDomainPolicyDescription, self).__init__(**kwargs) + self.type = 'RequireDomain' # type: str self.domain_name = kwargs.get('domain_name', None) - self.type = 'RequireDomain' class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where two replicas from the same partition should never be placed in the - same fault or upgrade domain. - While this is not common it can expose the service to an increased risk of - concurrent failures due to unplanned outages or other cases of - subsequent/concurrent failures. As an example, consider a case where - replicas are deployed across different data center, with one replica per - location. In the event that one of the datacenters goes offline, normally - the replica that was placed in that datacenter will be packed into one of - the remaining datacenters. If this is not desirable then this policy should - be set. + """Describes the policy to be used for placement of a Service Fabric service where two replicas from the same partition should never be placed in the same fault or upgrade domain. + +While this is not common it can expose the service to an increased risk of concurrent failures due to unplanned outages or other cases of subsequent/concurrent failures. As an example, consider a case where replicas are deployed across different data center, with one replica per location. In the event that one of the datacenters goes offline, normally the replica that was placed in that datacenter will be packed into one of the remaining datacenters. If this is not desirable then this policy should be set. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param domain_name: The name of the domain that should used for placement - as per this policy. + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: The name of the domain that should used for placement as per this policy. :type domain_name: str """ @@ -19349,40 +21201,40 @@ class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacemen 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServicePlacementRequireDomainDistributionPolicyDescription, self).__init__(**kwargs) + self.type = 'RequireDomainDistribution' # type: str self.domain_name = kwargs.get('domain_name', None) - self.type = 'RequireDomainDistribution' -class ServiceProperties(Model): +class ServiceProperties(msrest.serialization.Model): """Describes properties of a service resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. :param description: User readable description of the service. :type description: str - :param replica_count: The number of replicas of the service to create. - Defaults to 1 if not specified. + :param replica_count: The number of replicas of the service to create. Defaults to 1 if not + specified. :type replica_count: int - :param execution_policy: The execution policy of the service + :param execution_policy: The execution policy of the service. :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies - :type auto_scaling_policies: - list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :param auto_scaling_policies: Auto scaling policies. + :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the service. + :ivar status_details: Gives additional information about the current status of the service. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :ivar health_state: Describes the health state of an application resource. Possible values + include: "Invalid", "Ok", "Warning", "Error", "Unknown". :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', - this additional details from service fabric Health Manager for the user to - know why the service is marked unhealthy. + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional + details from service fabric Health Manager for the user to know why the service is marked + unhealthy. :vartype unhealthy_evaluation: str :param identity_refs: The service identity list. :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] @@ -19410,7 +21262,10 @@ class ServiceProperties(Model): 'dns_name': {'key': 'dnsName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceProperties, self).__init__(**kwargs) self.description = kwargs.get('description', None) self.replica_count = kwargs.get('replica_count', None) @@ -19424,22 +21279,19 @@ def __init__(self, **kwargs): self.dns_name = kwargs.get('dns_name', None) -class ServiceReplicaProperties(Model): +class ServiceReplicaProperties(msrest.serialization.Model): """Describes the properties of a service replica. All required parameters must be populated in order to send to Azure. - :param os_type: Required. The operation system required by the code in - service. Possible values include: 'Linux', 'Windows' + :param os_type: Required. The operation system required by the code in service. Possible values + include: "Linux", "Windows". :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that - forms the service. A code package describes the container and the - properties for running it. All the code packages are started together on - the same host and share the same context (network, process etc.). - :type code_packages: - list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service - needs to be part of. + :param code_packages: Required. Describes the set of code packages that forms the service. A + code package describes the container and the properties for running it. All the code packages + are started together on the same host and share the same context (network, process etc.). + :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef @@ -19457,10 +21309,13 @@ class ServiceReplicaProperties(Model): 'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsRef'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceReplicaProperties, self).__init__(**kwargs) - self.os_type = kwargs.get('os_type', None) - self.code_packages = kwargs.get('code_packages', None) + self.os_type = kwargs['os_type'] + self.code_packages = kwargs['code_packages'] self.network_refs = kwargs.get('network_refs', None) self.diagnostics = kwargs.get('diagnostics', None) @@ -19470,17 +21325,14 @@ class ServiceReplicaDescription(ServiceReplicaProperties): All required parameters must be populated in order to send to Azure. - :param os_type: Required. The operation system required by the code in - service. Possible values include: 'Linux', 'Windows' + :param os_type: Required. The operation system required by the code in service. Possible values + include: "Linux", "Windows". :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that - forms the service. A code package describes the container and the - properties for running it. All the code packages are started together on - the same host and share the same context (network, process etc.). - :type code_packages: - list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service - needs to be part of. + :param code_packages: Required. Describes the set of code packages that forms the service. A + code package describes the container and the properties for running it. All the code packages + are started together on the same host and share the same context (network, process etc.). + :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef @@ -19502,57 +21354,54 @@ class ServiceReplicaDescription(ServiceReplicaProperties): 'replica_name': {'key': 'replicaName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceReplicaDescription, self).__init__(**kwargs) - self.replica_name = kwargs.get('replica_name', None) + self.replica_name = kwargs['replica_name'] -class ServiceResourceDescription(Model): +class ServiceResourceDescription(msrest.serialization.Model): """This type describes a service resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the Service resource. :type name: str - :param os_type: Required. The operation system required by the code in - service. Possible values include: 'Linux', 'Windows' + :param os_type: Required. The operation system required by the code in service. Possible values + include: "Linux", "Windows". :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that - forms the service. A code package describes the container and the - properties for running it. All the code packages are started together on - the same host and share the same context (network, process etc.). - :type code_packages: - list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service - needs to be part of. + :param code_packages: Required. Describes the set of code packages that forms the service. A + code package describes the container and the properties for running it. All the code packages + are started together on the same host and share the same context (network, process etc.). + :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef :param description: User readable description of the service. :type description: str - :param replica_count: The number of replicas of the service to create. - Defaults to 1 if not specified. + :param replica_count: The number of replicas of the service to create. Defaults to 1 if not + specified. :type replica_count: int - :param execution_policy: The execution policy of the service + :param execution_policy: The execution policy of the service. :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies - :type auto_scaling_policies: - list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :param auto_scaling_policies: Auto scaling policies. + :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the service. + :ivar status_details: Gives additional information about the current status of the service. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :ivar health_state: Describes the health state of an application resource. Possible values + include: "Invalid", "Ok", "Warning", "Error", "Unknown". :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', - this additional details from service fabric Health Manager for the user to - know why the service is marked unhealthy. + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional + details from service fabric Health Manager for the user to know why the service is marked + unhealthy. :vartype unhealthy_evaluation: str :param identity_refs: The service identity list. :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] @@ -19588,11 +21437,14 @@ class ServiceResourceDescription(Model): 'dns_name': {'key': 'properties.dnsName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceResourceDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.os_type = kwargs.get('os_type', None) - self.code_packages = kwargs.get('code_packages', None) + self.name = kwargs['name'] + self.os_type = kwargs['os_type'] + self.code_packages = kwargs['code_packages'] self.network_refs = kwargs.get('network_refs', None) self.diagnostics = kwargs.get('diagnostics', None) self.description = kwargs.get('description', None) @@ -19607,39 +21459,131 @@ def __init__(self, **kwargs): self.dns_name = kwargs.get('dns_name', None) +class ServiceResourceProperties(ServiceReplicaProperties, ServiceProperties): + """This type describes properties of a service resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :param description: User readable description of the service. + :type description: str + :param replica_count: The number of replicas of the service to create. Defaults to 1 if not + specified. + :type replica_count: int + :param execution_policy: The execution policy of the service. + :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy + :param auto_scaling_policies: Auto scaling policies. + :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". + :vartype status: str or ~azure.servicefabric.models.ResourceStatus + :ivar status_details: Gives additional information about the current status of the service. + :vartype status_details: str + :ivar health_state: Describes the health state of an application resource. Possible values + include: "Invalid", "Ok", "Warning", "Error", "Unknown". + :vartype health_state: str or ~azure.servicefabric.models.HealthState + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional + details from service fabric Health Manager for the user to know why the service is marked + unhealthy. + :vartype unhealthy_evaluation: str + :param identity_refs: The service identity list. + :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] + :param dns_name: Dns name of the service. + :type dns_name: str + :param os_type: Required. The operation system required by the code in service. Possible values + include: "Linux", "Windows". + :type os_type: str or ~azure.servicefabric.models.OperatingSystemType + :param code_packages: Required. Describes the set of code packages that forms the service. A + code package describes the container and the properties for running it. All the code packages + are started together on the same host and share the same context (network, process etc.). + :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service needs to be part of. + :type network_refs: list[~azure.servicefabric.models.NetworkRef] + :param diagnostics: Reference to sinks in DiagnosticsDescription. + :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef + """ + + _validation = { + 'status': {'readonly': True}, + 'status_details': {'readonly': True}, + 'health_state': {'readonly': True}, + 'unhealthy_evaluation': {'readonly': True}, + 'os_type': {'required': True}, + 'code_packages': {'required': True}, + } + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'replica_count': {'key': 'replicaCount', 'type': 'int'}, + 'execution_policy': {'key': 'executionPolicy', 'type': 'ExecutionPolicy'}, + 'auto_scaling_policies': {'key': 'autoScalingPolicies', 'type': '[AutoScalingPolicy]'}, + 'status': {'key': 'status', 'type': 'str'}, + 'status_details': {'key': 'statusDetails', 'type': 'str'}, + 'health_state': {'key': 'healthState', 'type': 'str'}, + 'unhealthy_evaluation': {'key': 'unhealthyEvaluation', 'type': 'str'}, + 'identity_refs': {'key': 'identityRefs', 'type': '[ServiceIdentity]'}, + 'dns_name': {'key': 'dnsName', 'type': 'str'}, + 'os_type': {'key': 'osType', 'type': 'str'}, + 'code_packages': {'key': 'codePackages', 'type': '[ContainerCodePackageProperties]'}, + 'network_refs': {'key': 'networkRefs', 'type': '[NetworkRef]'}, + 'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsRef'}, + } + + def __init__( + self, + **kwargs + ): + super(ServiceResourceProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.replica_count = kwargs.get('replica_count', None) + self.execution_policy = kwargs.get('execution_policy', None) + self.auto_scaling_policies = kwargs.get('auto_scaling_policies', None) + self.status = None + self.status_details = None + self.health_state = None + self.unhealthy_evaluation = None + self.identity_refs = kwargs.get('identity_refs', None) + self.dns_name = kwargs.get('dns_name', None) + self.os_type = kwargs['os_type'] + self.code_packages = kwargs['code_packages'] + self.network_refs = kwargs.get('network_refs', None) + self.diagnostics = kwargs.get('diagnostics', None) + + class ServicesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for services of a certain service type - belonging to an application, containing health evaluations for each - unhealthy service that impacted current aggregated health state. Can be - returned when evaluating application health and the aggregated health state - is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for services of a certain service type belonging to an application, containing health evaluations for each unhealthy service that impacted current aggregated health state. Can be returned when evaluating application health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param service_type_name: Name of the service type of the services. :type service_type_name: str - :param max_percent_unhealthy_services: Maximum allowed percentage of - unhealthy services from the ServiceTypeHealthPolicy. + :param max_percent_unhealthy_services: Maximum allowed percentage of unhealthy services from + the ServiceTypeHealthPolicy. :type max_percent_unhealthy_services: int - :param total_count: Total number of services of the current service type - in the application from the health store. + :param total_count: Total number of services of the current service type in the application + from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - ServiceHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy ServiceHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -19647,57 +21591,55 @@ class ServicesHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServicesHealthEvaluation, self).__init__(**kwargs) + self.kind = 'Services' # type: str self.service_type_name = kwargs.get('service_type_name', None) self.max_percent_unhealthy_services = kwargs.get('max_percent_unhealthy_services', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'Services' -class ServiceTypeDescription(Model): - """Describes a service type defined in the service manifest of a provisioned - application type. The properties the ones defined in the service manifest. +class ServiceTypeDescription(msrest.serialization.Model): + """Describes a service type defined in the service manifest of a provisioned application type. The properties the ones defined in the service manifest. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceTypeDescription, - StatelessServiceTypeDescription + sub-classes are: StatefulServiceTypeDescription, StatelessServiceTypeDescription. All required parameters must be populated in order to send to Azure. - :param is_stateful: Indicates whether the service type is a stateful - service type or a stateless service type. This property is true if the - service type is a stateful service type, false otherwise. + :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. + Possible values include: "Invalid", "Stateless", "Stateful". + :type kind: str or ~azure.servicefabric.models.ServiceKind + :param is_stateful: Indicates whether the service type is a stateful service type or a + stateless service type. This property is true if the service type is a stateful service type, + false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when - instantiating this service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when instantiating this + service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy - descriptions. + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: - list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param kind: Required. Constant filled by server. - :type kind: str + :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] """ _validation = { @@ -19705,31 +21647,34 @@ class ServiceTypeDescription(Model): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'Stateful': 'StatefulServiceTypeDescription', 'Stateless': 'StatelessServiceTypeDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceTypeDescription, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.is_stateful = kwargs.get('is_stateful', None) self.service_type_name = kwargs.get('service_type_name', None) self.placement_constraints = kwargs.get('placement_constraints', None) self.load_metrics = kwargs.get('load_metrics', None) self.service_placement_policies = kwargs.get('service_placement_policies', None) self.extensions = kwargs.get('extensions', None) - self.kind = None -class ServiceTypeExtensionDescription(Model): +class ServiceTypeExtensionDescription(msrest.serialization.Model): """Describes extension of a service type defined in the service manifest. :param key: The name of the extension. @@ -19743,51 +21688,53 @@ class ServiceTypeExtensionDescription(Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceTypeExtensionDescription, self).__init__(**kwargs) self.key = kwargs.get('key', None) self.value = kwargs.get('value', None) -class ServiceTypeHealthPolicy(Model): - """Represents the health policy used to evaluate the health of services - belonging to a service type. - - :param max_percent_unhealthy_partitions_per_service: The maximum allowed - percentage of unhealthy partitions per service. Allowed values are Byte - values from zero to 100 - The percentage represents the maximum tolerated percentage of partitions - that can be unhealthy before the service is considered in error. - If the percentage is respected but there is at least one unhealthy - partition, the health is evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy - partitions over the total number of partitions in the service. - The computation rounds up to tolerate one failure on small numbers of - partitions. Default percentage is zero. Default value: 0 . +class ServiceTypeHealthPolicy(msrest.serialization.Model): + """Represents the health policy used to evaluate the health of services belonging to a service type. + + :param max_percent_unhealthy_partitions_per_service: The maximum allowed percentage of + unhealthy partitions per service. Allowed values are Byte values from zero to 100 + + The percentage represents the maximum tolerated percentage of partitions that can be unhealthy + before the service is considered in error. + If the percentage is respected but there is at least one unhealthy partition, the health is + evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy partitions over the total + number of partitions in the service. + The computation rounds up to tolerate one failure on small numbers of partitions. Default + percentage is zero. :type max_percent_unhealthy_partitions_per_service: int - :param max_percent_unhealthy_replicas_per_partition: The maximum allowed - percentage of unhealthy replicas per partition. Allowed values are Byte - values from zero to 100. - The percentage represents the maximum tolerated percentage of replicas - that can be unhealthy before the partition is considered in error. - If the percentage is respected but there is at least one unhealthy - replica, the health is evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy replicas - over the total number of replicas in the partition. - The computation rounds up to tolerate one failure on small numbers of - replicas. Default percentage is zero. Default value: 0 . + :param max_percent_unhealthy_replicas_per_partition: The maximum allowed percentage of + unhealthy replicas per partition. Allowed values are Byte values from zero to 100. + + The percentage represents the maximum tolerated percentage of replicas that can be unhealthy + before the partition is considered in error. + If the percentage is respected but there is at least one unhealthy replica, the health is + evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy replicas over the total + number of replicas in the partition. + The computation rounds up to tolerate one failure on small numbers of replicas. Default + percentage is zero. :type max_percent_unhealthy_replicas_per_partition: int - :param max_percent_unhealthy_services: The maximum allowed percentage of - unhealthy services. Allowed values are Byte values from zero to 100. - The percentage represents the maximum tolerated percentage of services - that can be unhealthy before the application is considered in error. - If the percentage is respected but there is at least one unhealthy - service, the health is evaluated as Warning. - This is calculated by dividing the number of unhealthy services of the - specific service type over the total number of services of the specific - service type. - The computation rounds up to tolerate one failure on small numbers of - services. Default percentage is zero. Default value: 0 . + :param max_percent_unhealthy_services: The maximum allowed percentage of unhealthy services. + Allowed values are Byte values from zero to 100. + + The percentage represents the maximum tolerated percentage of services that can be unhealthy + before the application is considered in error. + If the percentage is respected but there is at least one unhealthy service, the health is + evaluated as Warning. + This is calculated by dividing the number of unhealthy services of the specific service type + over the total number of services of the specific service type. + The computation rounds up to tolerate one failure on small numbers of services. Default + percentage is zero. :type max_percent_unhealthy_services: int """ @@ -19797,23 +21744,26 @@ class ServiceTypeHealthPolicy(Model): 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceTypeHealthPolicy, self).__init__(**kwargs) self.max_percent_unhealthy_partitions_per_service = kwargs.get('max_percent_unhealthy_partitions_per_service', 0) self.max_percent_unhealthy_replicas_per_partition = kwargs.get('max_percent_unhealthy_replicas_per_partition', 0) self.max_percent_unhealthy_services = kwargs.get('max_percent_unhealthy_services', 0) -class ServiceTypeHealthPolicyMapItem(Model): +class ServiceTypeHealthPolicyMapItem(msrest.serialization.Model): """Defines an item in ServiceTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the service type health policy map item. - This is the name of the service type. + :param key: Required. The key of the service type health policy map item. This is the name of + the service type. :type key: str - :param value: Required. The value of the service type health policy map - item. This is the ServiceTypeHealthPolicy for this service type. + :param value: Required. The value of the service type health policy map item. This is the + ServiceTypeHealthPolicy for this service type. :type value: ~azure.servicefabric.models.ServiceTypeHealthPolicy """ @@ -19827,29 +21777,29 @@ class ServiceTypeHealthPolicyMapItem(Model): 'value': {'key': 'Value', 'type': 'ServiceTypeHealthPolicy'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceTypeHealthPolicyMapItem, self).__init__(**kwargs) - self.key = kwargs.get('key', None) - self.value = kwargs.get('value', None) + self.key = kwargs['key'] + self.value = kwargs['value'] -class ServiceTypeInfo(Model): - """Information about a service type that is defined in a service manifest of a - provisioned application type. +class ServiceTypeInfo(msrest.serialization.Model): + """Information about a service type that is defined in a service manifest of a provisioned application type. - :param service_type_description: Describes a service type defined in the - service manifest of a provisioned application type. The properties the - ones defined in the service manifest. - :type service_type_description: - ~azure.servicefabric.models.ServiceTypeDescription - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_type_description: Describes a service type defined in the service manifest of a + provisioned application type. The properties the ones defined in the service manifest. + :type service_type_description: ~azure.servicefabric.models.ServiceTypeDescription + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param service_manifest_version: The version of the service manifest in - which this service type is defined. + :param service_manifest_version: The version of the service manifest in which this service type + is defined. :type service_manifest_version: str - :param is_service_group: Indicates whether the service is a service group. - If it is, the property value is true otherwise false. + :param is_service_group: Indicates whether the service is a service group. If it is, the + property value is true otherwise false. :type is_service_group: bool """ @@ -19860,7 +21810,10 @@ class ServiceTypeInfo(Model): 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceTypeInfo, self).__init__(**kwargs) self.service_type_description = kwargs.get('service_type_description', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) @@ -19868,9 +21821,8 @@ def __init__(self, **kwargs): self.is_service_group = kwargs.get('is_service_group', None) -class ServiceTypeManifest(Model): - """Contains the manifest describing a service type registered as part of an - application in a Service Fabric cluster. +class ServiceTypeManifest(msrest.serialization.Model): + """Contains the manifest describing a service type registered as part of an application in a Service Fabric cluster. :param manifest: The XML manifest as a string. :type manifest: str @@ -19880,87 +21832,89 @@ class ServiceTypeManifest(Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceTypeManifest, self).__init__(**kwargs) self.manifest = kwargs.get('manifest', None) -class ServiceUpdateDescription(Model): - """A ServiceUpdateDescription contains all of the information necessary to - update a service. +class ServiceUpdateDescription(msrest.serialization.Model): + """A ServiceUpdateDescription contains all of the information necessary to update a service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceUpdateDescription, - StatelessServiceUpdateDescription - - All required parameters must be populated in order to send to Azure. - - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 6 then the flags for - ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. - - None - Does not indicate any other properties are set. The value is - zero. - - TargetReplicaSetSize/InstanceCount - Indicates whether the - TargetReplicaSetSize property (for Stateful services) or the InstanceCount - property (for Stateless services) is set. The value is 1. - - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration - property is set. The value is 2. - - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property - is set. The value is 4. - - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration - property is set. The value is 8. - - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The - value is 16. - - PlacementConstraints - Indicates the PlacementConstraints property is - set. The value is 32. - - PlacementPolicyList - Indicates the ServicePlacementPolicies property is - set. The value is 64. - - Correlation - Indicates the CorrelationScheme property is set. The value - is 128. - - Metrics - Indicates the ServiceLoadMetrics property is set. The value is - 256. - - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The - value is 512. - - ScalingPolicy - Indicates the ScalingPolicies property is set. The value - is 1024. - - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit - property is set. The value is 2048. - - MinInstanceCount - Indicates the MinInstanceCount property is set. The - value is 4096. - - MinInstancePercentage - Indicates the MinInstancePercentage property is - set. The value is 8192. - - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration - property is set. The value is 16384. - - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property - is set. The value is 32768. + sub-classes are: StatefulServiceUpdateDescription, StatelessServiceUpdateDescription. + + All required parameters must be populated in order to send to Azure. + + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and + QuorumLossWaitDuration (4) are set. + + + * None - Does not indicate any other properties are set. The value is zero. + * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property + (for Stateful services) or the InstanceCount property (for Stateless services) is set. The + value is 1. + * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The + value is 2. + * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is + 4. + * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The + value is 8. + * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. + * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. + * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is + 64. + * Correlation - Indicates the CorrelationScheme property is set. The value is 128. + * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. + * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. + * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. + * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The + value is 2048. + * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. + * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is + 8192. + * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 16384. + * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 32768. + * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value + is 65536. + * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. + * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. + * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_dns_name: The DNS name of the service. + :type service_dns_name: str + :param tags_for_placement: Tags for placement of this service. + :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription + :param tags_for_running: Tags for running of this service. + :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription """ _validation = { @@ -19968,6 +21922,7 @@ class ServiceUpdateDescription(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -19975,15 +21930,21 @@ class ServiceUpdateDescription(Model): 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, + 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceUpdateDescription', 'Stateless': 'StatelessServiceUpdateDescription'} } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceUpdateDescription, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.flags = kwargs.get('flags', None) self.placement_constraints = kwargs.get('placement_constraints', None) self.correlation_scheme = kwargs.get('correlation_scheme', None) @@ -19991,20 +21952,21 @@ def __init__(self, **kwargs): self.service_placement_policies = kwargs.get('service_placement_policies', None) self.default_move_cost = kwargs.get('default_move_cost', None) self.scaling_policies = kwargs.get('scaling_policies', None) - self.service_kind = None + self.service_dns_name = kwargs.get('service_dns_name', None) + self.tags_for_placement = kwargs.get('tags_for_placement', None) + self.tags_for_running = kwargs.get('tags_for_running', None) -class ServiceUpgradeProgress(Model): - """Information about how many replicas are completed or pending for a specific - service during upgrade. +class ServiceUpgradeProgress(msrest.serialization.Model): + """Information about how many replicas are completed or pending for a specific service during upgrade. :param service_name: Name of the Service resource. :type service_name: str - :param completed_replica_count: The number of replicas that completes the - upgrade in the service. + :param completed_replica_count: The number of replicas that completes the upgrade in the + service. :type completed_replica_count: str - :param pending_replica_count: The number of replicas that are waiting to - be upgraded in the service. + :param pending_replica_count: The number of replicas that are waiting to be upgraded in the + service. :type pending_replica_count: str """ @@ -20014,26 +21976,25 @@ class ServiceUpgradeProgress(Model): 'pending_replica_count': {'key': 'PendingReplicaCount', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ServiceUpgradeProgress, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) self.completed_replica_count = kwargs.get('completed_replica_count', None) self.pending_replica_count = kwargs.get('pending_replica_count', None) -class Setting(Model): - """Describes a setting for the container. The setting file path can be fetched - from environment variable "Fabric_SettingPath". The path for Windows - container is "C:\\secrets". The path for Linux container is "/var/secrets". +class Setting(msrest.serialization.Model): + """Describes a setting for the container. The setting file path can be fetched from environment variable "Fabric_SettingPath". The path for Windows container is "C:\secrets". The path for Linux container is "/var/secrets". - :param type: The type of the setting being given in value. Possible values - include: 'ClearText', 'KeyVaultReference', 'SecretValueReference'. Default - value: "ClearText" . + :param type: The type of the setting being given in value. Possible values include: + "ClearText", "KeyVaultReference", "SecretValueReference". Default value: "ClearText". :type type: str or ~azure.servicefabric.models.SettingType :param name: The name of the setting. :type name: str - :param value: The value of the setting, will be processed based on the - type provided. + :param value: The value of the setting, will be processed based on the type provided. :type value: str """ @@ -20043,7 +22004,10 @@ class Setting(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(Setting, self).__init__(**kwargs) self.type = kwargs.get('type', "ClearText") self.name = kwargs.get('name', None) @@ -20051,20 +22015,19 @@ def __init__(self, **kwargs): class SingletonPartitionInformation(PartitionInformation): - """Information about a partition that is singleton. The services with - singleton partitioning scheme are effectively non-partitioned. They only - have one partition. + """Information about a partition that is singleton. The services with singleton partitioning scheme are effectively non-partitioned. They only have one partition. All required parameters must be populated in order to send to Azure. - :param id: An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. - The partition ID is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the IDs of its - partitions would be different. + :param service_partition_kind: Required. The kind of partitioning scheme used to partition the + service.Constant filled by server. Possible values include: "Invalid", "Singleton", + "Int64Range", "Named". + :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a + randomly generated GUID when the service was created. The partition ID is unique and does not + change for the lifetime of the service. If the same service was deleted and recreated the IDs + of its partitions would be different. :type id: str - :param service_partition_kind: Required. Constant filled by server. - :type service_partition_kind: str """ _validation = { @@ -20072,23 +22035,26 @@ class SingletonPartitionInformation(PartitionInformation): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SingletonPartitionInformation, self).__init__(**kwargs) - self.service_partition_kind = 'Singleton' + self.service_partition_kind = 'Singleton' # type: str class SingletonPartitionSchemeDescription(PartitionSchemeDescription): - """Describes the partition scheme of a singleton-partitioned, or - non-partitioned service. + """Describes the partition scheme of a singleton-partitioned, or non-partitioned service. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Constant filled by server. - :type partition_scheme: str + :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by + server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". + :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme """ _validation = { @@ -20099,76 +22065,66 @@ class SingletonPartitionSchemeDescription(PartitionSchemeDescription): 'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SingletonPartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = 'Singleton' + self.partition_scheme = 'Singleton' # type: str -class StartClusterUpgradeDescription(Model): +class StartClusterUpgradeDescription(msrest.serialization.Model): """Describes the parameters for starting a cluster upgrade. :param code_version: The cluster code version. :type code_version: str :param config_version: The cluster configuration version. :type config_version: str - :param upgrade_kind: The kind of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling'. Default value: - "Rolling" . + :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values + include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through - the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', - 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default - value: "Default" . + :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible + values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", + "ReverseLexicographical". Default value: "Default". :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param enable_delta_health_evaluation: When true, enables delta health - evaluation rather than absolute health evaluation after completion of each - upgrade domain. + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than + absolute health evaluation after completion of each upgrade domain. :type enable_delta_health_evaluation: bool - :param cluster_upgrade_health_policy: Defines a health policy used to - evaluate the health of the cluster during a cluster upgrade. + :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of + the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Defines the application health - policy map used to evaluate the health of an application or one of its - children entities. - :type application_health_policy_map: - ~azure.servicefabric.models.ApplicationHealthPolicies - :param instance_close_delay_duration_in_seconds: Duration in seconds, to - wait before a stateless instance is closed, to allow the active requests - to drain gracefully. This would be effective when the instance is closing - during the application/cluster - upgrade, only for those instances which have a non-zero delay duration - configured in the service description. See - InstanceCloseDelayDurationSeconds property in $ref: + :param application_health_policy_map: Defines the application health policy map used to + evaluate the health of an application or one of its children entities. + :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicies + :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a + stateless instance is closed, to allow the active requests to drain gracefully. This would be + effective when the instance is closing during the application/cluster + upgrade, only for those instances which have a non-zero delay duration configured in the + service description. See InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is - 4294967295, which indicates that the behavior will entirely depend on the - delay configured in the stateless service description. + Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates + that the behavior will entirely depend on the delay configured in the stateless service + description. :type instance_close_delay_duration_in_seconds: long """ @@ -20188,21 +22144,24 @@ class StartClusterUpgradeDescription(Model): 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StartClusterUpgradeDescription, self).__init__(**kwargs) self.code_version = kwargs.get('code_version', None) self.config_version = kwargs.get('config_version', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) - self.force_restart = kwargs.get('force_restart', None) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) + self.force_restart = kwargs.get('force_restart', False) self.sort_order = kwargs.get('sort_order', "Default") self.monitoring_policy = kwargs.get('monitoring_policy', None) self.cluster_health_policy = kwargs.get('cluster_health_policy', None) self.enable_delta_health_evaluation = kwargs.get('enable_delta_health_evaluation', None) self.cluster_upgrade_health_policy = kwargs.get('cluster_upgrade_health_policy', None) self.application_health_policy_map = kwargs.get('application_health_policy_map', None) - self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', None) + self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', 4294967295) class StartedChaosEvent(ChaosEvent): @@ -20210,31 +22169,34 @@ class StartedChaosEvent(ChaosEvent): All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param chaos_parameters: Defines all the parameters to configure a Chaos - run. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param chaos_parameters: Defines all the parameters to configure a Chaos run. :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'ChaosParameters'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StartedChaosEvent, self).__init__(**kwargs) + self.kind = 'Started' # type: str self.chaos_parameters = kwargs.get('chaos_parameters', None) - self.kind = 'Started' class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): @@ -20242,31 +22204,48 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param replica_instance_id: Required. Id of Replica instance. :type replica_instance_id: long @@ -20282,17 +22261,16 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'replica_instance_id': {'required': True}, @@ -20307,11 +22285,11 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, @@ -20325,18 +22303,21 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulReplicaHealthReportExpiredEvent, self).__init__(**kwargs) - self.replica_instance_id = kwargs.get('replica_instance_id', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'StatefulReplicaHealthReportExpired' + self.kind = 'StatefulReplicaHealthReportExpired' # type: str + self.replica_instance_id = kwargs['replica_instance_id'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class StatefulReplicaNewHealthReportEvent(ReplicaEvent): @@ -20344,31 +22325,48 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param replica_instance_id: Required. Id of Replica instance. :type replica_instance_id: long @@ -20384,17 +22382,16 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'replica_instance_id': {'required': True}, @@ -20409,11 +22406,11 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, @@ -20427,18 +22424,21 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulReplicaNewHealthReportEvent, self).__init__(**kwargs) - self.replica_instance_id = kwargs.get('replica_instance_id', None) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'StatefulReplicaNewHealthReport' + self.kind = 'StatefulReplicaNewHealthReport' # type: str + self.replica_instance_id = kwargs['replica_instance_id'] + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class StatefulServiceDescription(ServiceDescription): @@ -20446,111 +22446,105 @@ class StatefulServiceDescription(ServiceDescription): All required parameters must be populated in order to send to Azure. - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' - URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified - in the service manifest. + :param service_type_name: Required. Name of the service type as specified in the service + manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. - Initialization data is passed to service instances or replicas when they - are created. + :param initialization_data: The initialization data as an array of bytes. Initialization data + is passed to service instances or replicas when they are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an - object. - :type partition_description: - ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an object. + :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost - property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service - package to be used for a service. Possible values include: - 'SharedProcess', 'ExclusiveProcess' + :param service_package_activation_mode: The activation mode of service package to be used for a + service. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS - system service to be enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS system service to be + enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param target_replica_set_size: Required. The target replica set size as a - number. + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param tags_required_to_place: Tags for placement of this service. + :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :param tags_required_to_run: Tags for running of this service. + :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription + :param target_replica_set_size: Required. The target replica set size as a number. :type target_replica_set_size: int - :param min_replica_set_size: Required. The minimum replica set size as a - number. + :param min_replica_set_size: Required. The minimum replica set size as a number. :type min_replica_set_size: int - :param has_persisted_state: Required. A flag indicating whether this is a - persistent service which stores states on the local disk. If it is then - the value of this property is true, if not it is false. + :param has_persisted_state: Required. A flag indicating whether this is a persistent service + which stores states on the local disk. If it is then the value of this property is true, if not + it is false. :type has_persisted_state: bool - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 6 then the flags for - QuorumLossWaitDuration (2) and StandByReplicaKeepDuration(4) are set. - - None - Does not indicate any other properties are set. The value is - zero. - - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration - property is set. The value is 1. - - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property - is set. The value is 2. - - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration - property is set. The value is 4. - - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit - property is set. The value is 8. - - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property - is set. The value is 16. + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then the flags for QuorumLossWaitDuration (2) and + StandByReplicaKeepDuration(4) are set. + + + * None - Does not indicate any other properties are set. The value is zero. + * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The + value is 1. + * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is + 2. + * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The + value is 4. + * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The + value is 8. + * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value + is 16. :type flags: int - :param replica_restart_wait_duration_seconds: The duration, in seconds, - between when a replica goes down and when a new replica is created. + :param replica_restart_wait_duration_seconds: The duration, in seconds, between when a replica + goes down and when a new replica is created. :type replica_restart_wait_duration_seconds: long - :param quorum_loss_wait_duration_seconds: The maximum duration, in - seconds, for which a partition is allowed to be in a state of quorum loss. + :param quorum_loss_wait_duration_seconds: The maximum duration, in seconds, for which a + partition is allowed to be in a state of quorum loss. :type quorum_loss_wait_duration_seconds: long - :param stand_by_replica_keep_duration_seconds: The definition on how long - StandBy replicas should be maintained before being removed. + :param stand_by_replica_keep_duration_seconds: The definition on how long StandBy replicas + should be maintained before being removed. :type stand_by_replica_keep_duration_seconds: long - :param service_placement_time_limit_seconds: The duration for which - replicas can stay InBuild before reporting that build is stuck. + :param service_placement_time_limit_seconds: The duration for which replicas can stay InBuild + before reporting that build is stuck. :type service_placement_time_limit_seconds: long - :param drop_source_replica_on_move: Indicates whether to drop source - Secondary replica even if the target replica has not finished build. If - desired behavior is to drop it as soon as possible the value of this - property is true, if not it is false. + :param drop_source_replica_on_move: Indicates whether to drop source Secondary replica even if + the target replica has not finished build. If desired behavior is to drop it as soon as + possible the value of this property is true, if not it is false. :type drop_source_replica_on_move: bool + :param replica_lifecycle_description: Defines how replicas of this service will behave during + their lifecycle. + :type replica_lifecycle_description: ~azure.servicefabric.models.ReplicaLifecycleDescription """ _validation = { + 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, - 'service_kind': {'required': True}, 'target_replica_set_size': {'required': True, 'minimum': 1}, 'min_replica_set_size': {'required': True, 'minimum': 1}, 'has_persisted_state': {'required': True}, @@ -20561,6 +22555,7 @@ class StatefulServiceDescription(ServiceDescription): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -20575,7 +22570,8 @@ class StatefulServiceDescription(ServiceDescription): 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, + 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, @@ -20585,20 +22581,25 @@ class StatefulServiceDescription(ServiceDescription): 'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'long'}, 'service_placement_time_limit_seconds': {'key': 'ServicePlacementTimeLimitSeconds', 'type': 'long'}, 'drop_source_replica_on_move': {'key': 'DropSourceReplicaOnMove', 'type': 'bool'}, + 'replica_lifecycle_description': {'key': 'ReplicaLifecycleDescription', 'type': 'ReplicaLifecycleDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulServiceDescription, self).__init__(**kwargs) - self.target_replica_set_size = kwargs.get('target_replica_set_size', None) - self.min_replica_set_size = kwargs.get('min_replica_set_size', None) - self.has_persisted_state = kwargs.get('has_persisted_state', None) + self.service_kind = 'Stateful' # type: str + self.target_replica_set_size = kwargs['target_replica_set_size'] + self.min_replica_set_size = kwargs['min_replica_set_size'] + self.has_persisted_state = kwargs['has_persisted_state'] self.flags = kwargs.get('flags', None) self.replica_restart_wait_duration_seconds = kwargs.get('replica_restart_wait_duration_seconds', None) self.quorum_loss_wait_duration_seconds = kwargs.get('quorum_loss_wait_duration_seconds', None) self.stand_by_replica_keep_duration_seconds = kwargs.get('stand_by_replica_keep_duration_seconds', None) self.service_placement_time_limit_seconds = kwargs.get('service_placement_time_limit_seconds', None) self.drop_source_replica_on_move = kwargs.get('drop_source_replica_on_move', None) - self.service_kind = 'Stateful' + self.replica_lifecycle_description = kwargs.get('replica_lifecycle_description', None) class StatefulServiceInfo(ServiceInfo): @@ -20606,33 +22607,31 @@ class StatefulServiceInfo(ServiceInfo): All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded - representation of the service name. This is used in the REST APIs to - identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param id: The identity of the service. This ID is an encoded representation of the service + name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type id: str + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service - manifest. + :param type_name: Name of the service type as specified in the service manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values - include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', - 'Failed' + :param service_status: The status of the application. Possible values include: "Unknown", + "Active", "Upgrading", "Deleting", "Creating", "Failed". :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool - :param service_kind: Required. Constant filled by server. - :type service_kind: str :param has_persisted_state: Whether the service has persisted state. :type has_persisted_state: bool """ @@ -20643,20 +22642,23 @@ class StatefulServiceInfo(ServiceInfo): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulServiceInfo, self).__init__(**kwargs) + self.service_kind = 'Stateful' # type: str self.has_persisted_state = kwargs.get('has_persisted_state', None) - self.service_kind = 'Stateful' class StatefulServicePartitionInfo(ServicePartitionInfo): @@ -20664,35 +22666,31 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): All required parameters must be populated in order to send to Azure. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service - partition. Possible values include: 'Invalid', 'Ready', 'NotReady', - 'InQuorumLoss', 'Reconfiguring', 'Deleting' - :type partition_status: str or - ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, - partitioning scheme and keys supported by it. - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :param partition_status: The status of the service fabric service partition. Possible values + include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". + :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, partitioning scheme and + keys supported by it. + :type partition_information: ~azure.servicefabric.models.PartitionInformation :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: long :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: long - :param last_quorum_loss_duration: The duration for which this partition - was in quorum loss. If the partition is currently in quorum loss, it - returns the duration since it has been in that state. This field is using - ISO8601 format for specifying the duration. - :type last_quorum_loss_duration: timedelta - :param primary_epoch: An Epoch is a configuration number for the partition - as a whole. When the configuration of the replica set changes, for example - when the Primary replica changes, the operations that are replicated from - the new Primary replica are said to be a new Epoch from the ones which - were sent by the old Primary replica. + :param last_quorum_loss_duration: The duration for which this partition was in quorum loss. If + the partition is currently in quorum loss, it returns the duration since it has been in that + state. This field is using ISO8601 format for specifying the duration. + :type last_quorum_loss_duration: ~datetime.timedelta + :param primary_epoch: An Epoch is a configuration number for the partition as a whole. When the + configuration of the replica set changes, for example when the Primary replica changes, the + operations that are replicated from the new Primary replica are said to be a new Epoch from the + ones which were sent by the old Primary replica. :type primary_epoch: ~azure.servicefabric.models.Epoch """ @@ -20701,60 +22699,59 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'long'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'long'}, 'last_quorum_loss_duration': {'key': 'LastQuorumLossDuration', 'type': 'duration'}, 'primary_epoch': {'key': 'PrimaryEpoch', 'type': 'Epoch'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulServicePartitionInfo, self).__init__(**kwargs) + self.service_kind = 'Stateful' # type: str self.target_replica_set_size = kwargs.get('target_replica_set_size', None) self.min_replica_set_size = kwargs.get('min_replica_set_size', None) self.last_quorum_loss_duration = kwargs.get('last_quorum_loss_duration', None) self.primary_epoch = kwargs.get('primary_epoch', None) - self.service_kind = 'Stateful' class StatefulServiceReplicaHealth(ReplicaHealth): """Represents the health of the stateful service replica. - Contains the replica aggregated health state, the health events and the - unhealthy evaluations. +Contains the replica aggregated health state, the health events and the unhealthy evaluations. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str """ @@ -20767,41 +22764,39 @@ class StatefulServiceReplicaHealth(ReplicaHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulServiceReplicaHealth, self).__init__(**kwargs) + self.service_kind = 'Stateful' # type: str self.replica_id = kwargs.get('replica_id', None) - self.service_kind = 'Stateful' class StatefulServiceReplicaHealthState(ReplicaHealthState): - """Represents the health state of the stateful service replica, which contains - the replica ID and the aggregated health state. + """Represents the health state of the stateful service replica, which contains the replica ID and the aggregated health state. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param partition_id: The ID of the partition to which this replica - belongs. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param partition_id: The ID of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str """ @@ -20811,52 +22806,49 @@ class StatefulServiceReplicaHealthState(ReplicaHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulServiceReplicaHealthState, self).__init__(**kwargs) + self.service_kind = 'Stateful' # type: str self.replica_id = kwargs.get('replica_id', None) - self.service_kind = 'Stateful' class StatefulServiceReplicaInfo(ReplicaInfo): - """Represents a stateful service replica. This includes information about the - identity, role, status, health, node name, uptime, and other details about - the replica. + """Represents a stateful service replica. This includes information about the identity, role, status, health, node name, uptime, and other details about the replica. All required parameters must be populated in order to send to Azure. - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of - the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. :type last_in_build_duration_in_seconds: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_role: The role of a replica of a stateful service. Possible - values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', - 'ActiveSecondary' + :param replica_role: The role of a replica of a stateful service. Possible values include: + "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". :type replica_role: str or ~azure.servicefabric.models.ReplicaRole - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str """ @@ -20865,55 +22857,53 @@ class StatefulServiceReplicaInfo(ReplicaInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulServiceReplicaInfo, self).__init__(**kwargs) + self.service_kind = 'Stateful' # type: str self.replica_role = kwargs.get('replica_role', None) self.replica_id = kwargs.get('replica_id', None) - self.service_kind = 'Stateful' class StatefulServiceTypeDescription(ServiceTypeDescription): - """Describes a stateful service type defined in the service manifest of a - provisioned application type. + """Describes a stateful service type defined in the service manifest of a provisioned application type. All required parameters must be populated in order to send to Azure. - :param is_stateful: Indicates whether the service type is a stateful - service type or a stateless service type. This property is true if the - service type is a stateful service type, false otherwise. + :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. + Possible values include: "Invalid", "Stateless", "Stateful". + :type kind: str or ~azure.servicefabric.models.ServiceKind + :param is_stateful: Indicates whether the service type is a stateful service type or a + stateless service type. This property is true if the service type is a stateful service type, + false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when - instantiating this service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when instantiating this + service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy - descriptions. + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: - list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param kind: Required. Constant filled by server. - :type kind: str - :param has_persisted_state: A flag indicating whether this is a persistent - service which stores states on the local disk. If it is then the value of - this property is true, if not it is false. + :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param has_persisted_state: A flag indicating whether this is a persistent service which stores + states on the local disk. If it is then the value of this property is true, if not it is false. :type has_persisted_state: bool """ @@ -20922,20 +22912,23 @@ class StatefulServiceTypeDescription(ServiceTypeDescription): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulServiceTypeDescription, self).__init__(**kwargs) + self.kind = 'Stateful' # type: str self.has_persisted_state = kwargs.get('has_persisted_state', None) - self.kind = 'Stateful' class StatefulServiceUpdateDescription(ServiceUpdateDescription): @@ -20943,93 +22936,96 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): All required parameters must be populated in order to send to Azure. - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 6 then the flags for - ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. - - None - Does not indicate any other properties are set. The value is - zero. - - TargetReplicaSetSize/InstanceCount - Indicates whether the - TargetReplicaSetSize property (for Stateful services) or the InstanceCount - property (for Stateless services) is set. The value is 1. - - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration - property is set. The value is 2. - - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property - is set. The value is 4. - - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration - property is set. The value is 8. - - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The - value is 16. - - PlacementConstraints - Indicates the PlacementConstraints property is - set. The value is 32. - - PlacementPolicyList - Indicates the ServicePlacementPolicies property is - set. The value is 64. - - Correlation - Indicates the CorrelationScheme property is set. The value - is 128. - - Metrics - Indicates the ServiceLoadMetrics property is set. The value is - 256. - - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The - value is 512. - - ScalingPolicy - Indicates the ScalingPolicies property is set. The value - is 1024. - - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit - property is set. The value is 2048. - - MinInstanceCount - Indicates the MinInstanceCount property is set. The - value is 4096. - - MinInstancePercentage - Indicates the MinInstancePercentage property is - set. The value is 8192. - - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration - property is set. The value is 16384. - - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property - is set. The value is 32768. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and + QuorumLossWaitDuration (4) are set. + + + * None - Does not indicate any other properties are set. The value is zero. + * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property + (for Stateful services) or the InstanceCount property (for Stateless services) is set. The + value is 1. + * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The + value is 2. + * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is + 4. + * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The + value is 8. + * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. + * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. + * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is + 64. + * Correlation - Indicates the CorrelationScheme property is set. The value is 128. + * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. + * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. + * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. + * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The + value is 2048. + * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. + * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is + 8192. + * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 16384. + * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 32768. + * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value + is 65536. + * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. + * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. + * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_dns_name: The DNS name of the service. + :type service_dns_name: str + :param tags_for_placement: Tags for placement of this service. + :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription + :param tags_for_running: Tags for running of this service. + :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: int :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: int - :param replica_restart_wait_duration_seconds: The duration, in seconds, - between when a replica goes down and when a new replica is created. + :param replica_restart_wait_duration_seconds: The duration, in seconds, between when a replica + goes down and when a new replica is created. :type replica_restart_wait_duration_seconds: str - :param quorum_loss_wait_duration_seconds: The maximum duration, in - seconds, for which a partition is allowed to be in a state of quorum loss. + :param quorum_loss_wait_duration_seconds: The maximum duration, in seconds, for which a + partition is allowed to be in a state of quorum loss. :type quorum_loss_wait_duration_seconds: str - :param stand_by_replica_keep_duration_seconds: The definition on how long - StandBy replicas should be maintained before being removed. + :param stand_by_replica_keep_duration_seconds: The definition on how long StandBy replicas + should be maintained before being removed. :type stand_by_replica_keep_duration_seconds: str - :param service_placement_time_limit_seconds: The duration for which - replicas can stay InBuild before reporting that build is stuck. + :param service_placement_time_limit_seconds: The duration for which replicas can stay InBuild + before reporting that build is stuck. :type service_placement_time_limit_seconds: str - :param drop_source_replica_on_move: Indicates whether to drop source - Secondary replica even if the target replica has not finished build. If - desired behavior is to drop it as soon as possible the value of this - property is true, if not it is false. + :param drop_source_replica_on_move: Indicates whether to drop source Secondary replica even if + the target replica has not finished build. If desired behavior is to drop it as soon as + possible the value of this property is true, if not it is false. :type drop_source_replica_on_move: bool + :param replica_lifecycle_description: Defines how replicas of this service will behave during + their lifecycle. + :type replica_lifecycle_description: ~azure.servicefabric.models.ReplicaLifecycleDescription """ _validation = { @@ -21039,6 +23035,7 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -21046,7 +23043,9 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, + 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, 'replica_restart_wait_duration_seconds': {'key': 'ReplicaRestartWaitDurationSeconds', 'type': 'str'}, @@ -21054,10 +23053,15 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'str'}, 'service_placement_time_limit_seconds': {'key': 'ServicePlacementTimeLimitSeconds', 'type': 'str'}, 'drop_source_replica_on_move': {'key': 'DropSourceReplicaOnMove', 'type': 'bool'}, + 'replica_lifecycle_description': {'key': 'ReplicaLifecycleDescription', 'type': 'ReplicaLifecycleDescription'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatefulServiceUpdateDescription, self).__init__(**kwargs) + self.service_kind = 'Stateful' # type: str self.target_replica_set_size = kwargs.get('target_replica_set_size', None) self.min_replica_set_size = kwargs.get('min_replica_set_size', None) self.replica_restart_wait_duration_seconds = kwargs.get('replica_restart_wait_duration_seconds', None) @@ -21065,7 +23069,7 @@ def __init__(self, **kwargs): self.stand_by_replica_keep_duration_seconds = kwargs.get('stand_by_replica_keep_duration_seconds', None) self.service_placement_time_limit_seconds = kwargs.get('service_placement_time_limit_seconds', None) self.drop_source_replica_on_move = kwargs.get('drop_source_replica_on_move', None) - self.service_kind = 'Stateful' + self.replica_lifecycle_description = kwargs.get('replica_lifecycle_description', None) class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): @@ -21073,31 +23077,48 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param source_id: Required. Id of report source. :type source_id: str @@ -21111,17 +23132,16 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'source_id': {'required': True}, @@ -21135,11 +23155,11 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -21152,17 +23172,20 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessReplicaHealthReportExpiredEvent, self).__init__(**kwargs) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'StatelessReplicaHealthReportExpired' + self.kind = 'StatelessReplicaHealthReportExpired' # type: str + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class StatelessReplicaNewHealthReportEvent(ReplicaEvent): @@ -21170,31 +23193,48 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param source_id: Required. Id of report source. :type source_id: str @@ -21208,17 +23248,16 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'source_id': {'required': True}, @@ -21232,11 +23271,11 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -21249,17 +23288,20 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessReplicaNewHealthReportEvent, self).__init__(**kwargs) - self.source_id = kwargs.get('source_id', None) - self.property = kwargs.get('property', None) - self.health_state = kwargs.get('health_state', None) - self.time_to_live_ms = kwargs.get('time_to_live_ms', None) - self.sequence_number = kwargs.get('sequence_number', None) - self.description = kwargs.get('description', None) - self.remove_when_expired = kwargs.get('remove_when_expired', None) - self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) - self.kind = 'StatelessReplicaNewHealthReport' + self.kind = 'StatelessReplicaNewHealthReport' # type: str + self.source_id = kwargs['source_id'] + self.property = kwargs['property'] + self.health_state = kwargs['health_state'] + self.time_to_live_ms = kwargs['time_to_live_ms'] + self.sequence_number = kwargs['sequence_number'] + self.description = kwargs['description'] + self.remove_when_expired = kwargs['remove_when_expired'] + self.source_utc_timestamp = kwargs['source_utc_timestamp'] class StatelessServiceDescription(ServiceDescription): @@ -21267,121 +23309,128 @@ class StatelessServiceDescription(ServiceDescription): All required parameters must be populated in order to send to Azure. - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' - URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified - in the service manifest. + :param service_type_name: Required. Name of the service type as specified in the service + manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. - Initialization data is passed to service instances or replicas when they - are created. + :param initialization_data: The initialization data as an array of bytes. Initialization data + is passed to service instances or replicas when they are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an - object. - :type partition_description: - ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an object. + :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost - property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service - package to be used for a service. Possible values include: - 'SharedProcess', 'ExclusiveProcess' + :param service_package_activation_mode: The activation mode of service package to be used for a + service. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS - system service to be enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS system service to be + enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param tags_required_to_place: Tags for placement of this service. + :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :param tags_required_to_run: Tags for running of this service. + :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription :param instance_count: Required. The instance count. :type instance_count: int - :param min_instance_count: MinInstanceCount is the minimum number of - instances that must be up to meet the EnsureAvailability safety check - during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation - -1 is first converted into the number of nodes on which the instances are - allowed to be placed according to the placement constraints on the - service. + :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up + to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted + into the number of nodes on which the instances are allowed to be placed according to the + placement constraints on the service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum - percentage of InstanceCount that must be up to meet the EnsureAvailability - safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage - computation, -1 is first converted into the number of nodes on which the - instances are allowed to be placed according to the placement constraints - on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum percentage of + InstanceCount that must be up to meet the EnsureAvailability safety check during operations + like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first + converted into the number of nodes on which the instances are allowed to be placed according to + the placement constraints on the service. :type min_instance_percentage: int - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 1 then the flags for - InstanceCloseDelayDuration is set. - - None - Does not indicate any other properties are set. The value is - zero. - - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration - property is set. The value is 1. + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 1 then the flags for InstanceCloseDelayDuration is set. + + + * None - Does not indicate any other properties are set. The value is zero. + * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 1. + * InstanceRestartWaitDuration - Indicates the InstanceRestartWaitDurationSeconds property is + set. The value is 2. :type flags: int - :param instance_close_delay_duration_seconds: Duration in seconds, to wait - before a stateless instance is closed, to allow the active requests to - drain gracefully. This would be effective when the instance is closing - during the application/cluster upgrade and disabling node. - The endpoint exposed on this instance is removed prior to starting the - delay, which prevents new connections to this instance. + :param instance_close_delay_duration_seconds: Duration in seconds, to wait before a stateless + instance is closed, to allow the active requests to drain gracefully. This would be effective + when the instance is closing during the application/cluster upgrade and disabling node. + The endpoint exposed on this instance is removed prior to starting the delay, which prevents + new connections to this instance. In addition, clients that have subscribed to service endpoint change events(https://docs.microsoft.com/dotnet/api/system.fabric.fabricclient.servicemanagementclient.registerservicenotificationfilterasync), can do the following upon receiving the endpoint removal notification: - - Stop sending new requests to this instance. - - Close existing connections after in-flight requests have completed. - - Connect to a different instance of the service partition for future - requests. - Note, the default value of InstanceCloseDelayDuration is 0, which - indicates that there won't be any delay or removal of the endpoint prior - to closing the instance. + + .. code-block:: + + - Stop sending new requests to this instance. + - Close existing connections after in-flight requests have completed. + - Connect to a different instance of the service partition for future requests. + + Note, the default value of InstanceCloseDelayDuration is 0, which indicates that there won't + be any delay or removal of the endpoint prior to closing the instance. :type instance_close_delay_duration_seconds: long + :param instance_lifecycle_description: Defines how instances of this service will behave during + their lifecycle. + :type instance_lifecycle_description: ~azure.servicefabric.models.InstanceLifecycleDescription + :param instance_restart_wait_duration_seconds: When a stateless instance goes down, this timer + starts. When it expires Service Fabric will create a new instance on any node in the cluster. + This configuration is to reduce unnecessary creation of a new instance in situations where the + instance going down is likely to recover in a short time. For example, during an upgrade. + The default value is 0, which indicates that when stateless instance goes down, Service Fabric + will immediately start building its replacement. + :type instance_restart_wait_duration_seconds: long """ _validation = { + 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, - 'service_kind': {'required': True}, 'instance_count': {'required': True, 'minimum': -1}, + 'min_instance_count': {'minimum': 1}, + 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, 'instance_close_delay_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, + 'instance_restart_wait_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -21396,22 +23445,30 @@ class StatelessServiceDescription(ServiceDescription): 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, + 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, 'flags': {'key': 'Flags', 'type': 'int'}, 'instance_close_delay_duration_seconds': {'key': 'InstanceCloseDelayDurationSeconds', 'type': 'long'}, + 'instance_lifecycle_description': {'key': 'InstanceLifecycleDescription', 'type': 'InstanceLifecycleDescription'}, + 'instance_restart_wait_duration_seconds': {'key': 'InstanceRestartWaitDurationSeconds', 'type': 'long'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessServiceDescription, self).__init__(**kwargs) - self.instance_count = kwargs.get('instance_count', None) - self.min_instance_count = kwargs.get('min_instance_count', None) - self.min_instance_percentage = kwargs.get('min_instance_percentage', None) + self.service_kind = 'Stateless' # type: str + self.instance_count = kwargs['instance_count'] + self.min_instance_count = kwargs.get('min_instance_count', 1) + self.min_instance_percentage = kwargs.get('min_instance_percentage', 0) self.flags = kwargs.get('flags', None) self.instance_close_delay_duration_seconds = kwargs.get('instance_close_delay_duration_seconds', None) - self.service_kind = 'Stateless' + self.instance_lifecycle_description = kwargs.get('instance_lifecycle_description', None) + self.instance_restart_wait_duration_seconds = kwargs.get('instance_restart_wait_duration_seconds', None) class StatelessServiceInfo(ServiceInfo): @@ -21419,33 +23476,31 @@ class StatelessServiceInfo(ServiceInfo): All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded - representation of the service name. This is used in the REST APIs to - identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param id: The identity of the service. This ID is an encoded representation of the service + name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type id: str + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service - manifest. + :param type_name: Name of the service type as specified in the service manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values - include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', - 'Failed' + :param service_status: The status of the application. Possible values include: "Unknown", + "Active", "Upgrading", "Deleting", "Creating", "Failed". :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -21454,53 +23509,53 @@ class StatelessServiceInfo(ServiceInfo): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessServiceInfo, self).__init__(**kwargs) - self.service_kind = 'Stateless' + self.service_kind = 'Stateless' # type: str class StatelessServiceInstanceHealth(ReplicaHealth): """Represents the health of the stateless service instance. - Contains the instance aggregated health state, the health events and the - unhealthy evaluations. +Contains the instance aggregated health state, the health events and the unhealthy evaluations. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param instance_id: Id of a stateless service instance. InstanceId is used - by Service Fabric to uniquely identify an instance of a partition of a - stateless service. It is unique within a partition and does not change for - the lifetime of the instance. If the instance has failed over on the same - or different node, it will get a different value for the InstanceId. + :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to + uniquely identify an instance of a partition of a stateless service. It is unique within a + partition and does not change for the lifetime of the instance. If the instance has failed over + on the same or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -21513,36 +23568,36 @@ class StatelessServiceInstanceHealth(ReplicaHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessServiceInstanceHealth, self).__init__(**kwargs) + self.service_kind = 'Stateless' # type: str self.instance_id = kwargs.get('instance_id', None) - self.service_kind = 'Stateless' class StatelessServiceInstanceHealthState(ReplicaHealthState): - """Represents the health state of the stateless service instance, which - contains the instance ID and the aggregated health state. + """Represents the health state of the stateless service instance, which contains the instance ID and the aggregated health state. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param partition_id: The ID of the partition to which this replica - belongs. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param partition_id: The ID of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of the stateless service instance on the wire this - field is called ReplicaId. + :param replica_id: Id of the stateless service instance on the wire this field is called + ReplicaId. :type replica_id: str """ @@ -21552,46 +23607,45 @@ class StatelessServiceInstanceHealthState(ReplicaHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessServiceInstanceHealthState, self).__init__(**kwargs) + self.service_kind = 'Stateless' # type: str self.replica_id = kwargs.get('replica_id', None) - self.service_kind = 'Stateless' class StatelessServiceInstanceInfo(ReplicaInfo): - """Represents a stateless service instance. This includes information about - the identity, status, health, node name, uptime, and other details about - the instance. + """Represents a stateless service instance. This includes information about the identity, status, health, node name, uptime, and other details about the instance. All required parameters must be populated in order to send to Azure. - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of - the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. :type last_in_build_duration_in_seconds: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param instance_id: Id of a stateless service instance. InstanceId is used - by Service Fabric to uniquely identify an instance of a partition of a - stateless service. It is unique within a partition and does not change for - the lifetime of the instance. If the instance has failed over on the same - or different node, it will get a different value for the InstanceId. + :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to + uniquely identify an instance of a partition of a stateless service. It is unique within a + partition and does not change for the lifetime of the instance. If the instance has failed over + on the same or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -21600,19 +23654,22 @@ class StatelessServiceInstanceInfo(ReplicaInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessServiceInstanceInfo, self).__init__(**kwargs) + self.service_kind = 'Stateless' # type: str self.instance_id = kwargs.get('instance_id', None) - self.service_kind = 'Stateless' class StatelessServicePartitionInfo(ServicePartitionInfo): @@ -21620,100 +23677,95 @@ class StatelessServicePartitionInfo(ServicePartitionInfo): All required parameters must be populated in order to send to Azure. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service - partition. Possible values include: 'Invalid', 'Ready', 'NotReady', - 'InQuorumLoss', 'Reconfiguring', 'Deleting' - :type partition_status: str or - ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, - partitioning scheme and keys supported by it. - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :param partition_status: The status of the service fabric service partition. Possible values + include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". + :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, partitioning scheme and + keys supported by it. + :type partition_information: ~azure.servicefabric.models.PartitionInformation :param instance_count: Number of instances of this partition. :type instance_count: long - :param min_instance_count: MinInstanceCount is the minimum number of - instances that must be up to meet the EnsureAvailability safety check - during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation - -1 is first converted into the number of nodes on which the instances are - allowed to be placed according to the placement constraints on the - service. + :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up + to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted + into the number of nodes on which the instances are allowed to be placed according to the + placement constraints on the service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum - percentage of InstanceCount that must be up to meet the EnsureAvailability - safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage - computation, -1 is first converted into the number of nodes on which the - instances are allowed to be placed according to the placement constraints - on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum percentage of + InstanceCount that must be up to meet the EnsureAvailability safety check during operations + like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first + converted into the number of nodes on which the instances are allowed to be placed according to + the placement constraints on the service. :type min_instance_percentage: int """ _validation = { 'service_kind': {'required': True}, + 'min_instance_count': {'minimum': 1}, + 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'long'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessServicePartitionInfo, self).__init__(**kwargs) + self.service_kind = 'Stateless' # type: str self.instance_count = kwargs.get('instance_count', None) - self.min_instance_count = kwargs.get('min_instance_count', None) - self.min_instance_percentage = kwargs.get('min_instance_percentage', None) - self.service_kind = 'Stateless' + self.min_instance_count = kwargs.get('min_instance_count', 1) + self.min_instance_percentage = kwargs.get('min_instance_percentage', 0) class StatelessServiceTypeDescription(ServiceTypeDescription): - """Describes a stateless service type defined in the service manifest of a - provisioned application type. + """Describes a stateless service type defined in the service manifest of a provisioned application type. All required parameters must be populated in order to send to Azure. - :param is_stateful: Indicates whether the service type is a stateful - service type or a stateless service type. This property is true if the - service type is a stateful service type, false otherwise. + :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. + Possible values include: "Invalid", "Stateless", "Stateful". + :type kind: str or ~azure.servicefabric.models.ServiceKind + :param is_stateful: Indicates whether the service type is a stateful service type or a + stateless service type. This property is true if the service type is a stateful service type, + false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when - instantiating this service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when instantiating this + service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy - descriptions. + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: - list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param kind: Required. Constant filled by server. - :type kind: str - :param use_implicit_host: A flag indicating if this type is not - implemented and hosted by a user service process, but is implicitly hosted - by a system created process. This value is true for services using the - guest executable services, false otherwise. + :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param use_implicit_host: A flag indicating if this type is not implemented and hosted by a + user service process, but is implicitly hosted by a system created process. This value is true + for services using the guest executable services, false otherwise. :type use_implicit_host: bool """ @@ -21722,20 +23774,23 @@ class StatelessServiceTypeDescription(ServiceTypeDescription): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'use_implicit_host': {'key': 'UseImplicitHost', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessServiceTypeDescription, self).__init__(**kwargs) + self.kind = 'Stateless' # type: str self.use_implicit_host = kwargs.get('use_implicit_host', None) - self.kind = 'Stateless' class StatelessServiceUpdateDescription(ServiceUpdateDescription): @@ -21743,117 +23798,129 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): All required parameters must be populated in order to send to Azure. - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 6 then the flags for - ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. - - None - Does not indicate any other properties are set. The value is - zero. - - TargetReplicaSetSize/InstanceCount - Indicates whether the - TargetReplicaSetSize property (for Stateful services) or the InstanceCount - property (for Stateless services) is set. The value is 1. - - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration - property is set. The value is 2. - - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property - is set. The value is 4. - - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration - property is set. The value is 8. - - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The - value is 16. - - PlacementConstraints - Indicates the PlacementConstraints property is - set. The value is 32. - - PlacementPolicyList - Indicates the ServicePlacementPolicies property is - set. The value is 64. - - Correlation - Indicates the CorrelationScheme property is set. The value - is 128. - - Metrics - Indicates the ServiceLoadMetrics property is set. The value is - 256. - - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The - value is 512. - - ScalingPolicy - Indicates the ScalingPolicies property is set. The value - is 1024. - - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit - property is set. The value is 2048. - - MinInstanceCount - Indicates the MinInstanceCount property is set. The - value is 4096. - - MinInstancePercentage - Indicates the MinInstancePercentage property is - set. The value is 8192. - - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration - property is set. The value is 16384. - - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property - is set. The value is 32768. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and + QuorumLossWaitDuration (4) are set. + + + * None - Does not indicate any other properties are set. The value is zero. + * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property + (for Stateful services) or the InstanceCount property (for Stateless services) is set. The + value is 1. + * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The + value is 2. + * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is + 4. + * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The + value is 8. + * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. + * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. + * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is + 64. + * Correlation - Indicates the CorrelationScheme property is set. The value is 128. + * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. + * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. + * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. + * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The + value is 2048. + * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. + * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is + 8192. + * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 16384. + * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 32768. + * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value + is 65536. + * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. + * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. + * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_dns_name: The DNS name of the service. + :type service_dns_name: str + :param tags_for_placement: Tags for placement of this service. + :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription + :param tags_for_running: Tags for running of this service. + :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription :param instance_count: The instance count. :type instance_count: int - :param min_instance_count: MinInstanceCount is the minimum number of - instances that must be up to meet the EnsureAvailability safety check - during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation - -1 is first converted into the number of nodes on which the instances are - allowed to be placed according to the placement constraints on the - service. + :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up + to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted + into the number of nodes on which the instances are allowed to be placed according to the + placement constraints on the service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum - percentage of InstanceCount that must be up to meet the EnsureAvailability - safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage - computation, -1 is first converted into the number of nodes on which the - instances are allowed to be placed according to the placement constraints - on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum percentage of + InstanceCount that must be up to meet the EnsureAvailability safety check during operations + like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first + converted into the number of nodes on which the instances are allowed to be placed according to + the placement constraints on the service. :type min_instance_percentage: int - :param instance_close_delay_duration_seconds: Duration in seconds, to wait - before a stateless instance is closed, to allow the active requests to - drain gracefully. This would be effective when the instance is closing - during the application/cluster upgrade and disabling node. - The endpoint exposed on this instance is removed prior to starting the - delay, which prevents new connections to this instance. + :param instance_close_delay_duration_seconds: Duration in seconds, to wait before a stateless + instance is closed, to allow the active requests to drain gracefully. This would be effective + when the instance is closing during the application/cluster upgrade and disabling node. + The endpoint exposed on this instance is removed prior to starting the delay, which prevents + new connections to this instance. In addition, clients that have subscribed to service endpoint change events(https://docs.microsoft.com/dotnet/api/system.fabric.fabricclient.servicemanagementclient.registerservicenotificationfilterasync), can do the following upon receiving the endpoint removal notification: - - Stop sending new requests to this instance. - - Close existing connections after in-flight requests have completed. - - Connect to a different instance of the service partition for future - requests. + + .. code-block:: + + - Stop sending new requests to this instance. + - Close existing connections after in-flight requests have completed. + - Connect to a different instance of the service partition for future requests. :type instance_close_delay_duration_seconds: str + :param instance_lifecycle_description: Defines how instances of this service will behave during + their lifecycle. + :type instance_lifecycle_description: ~azure.servicefabric.models.InstanceLifecycleDescription + :param instance_restart_wait_duration_seconds: When a stateless instance goes down, this timer + starts. When it expires Service Fabric will create a new instance on any node in the cluster. + This configuration is to reduce unnecessary creation of a new instance in situations where the + instance going down is likely to recover in a short time. For example, during an upgrade. + The default value is 0, which indicates that when stateless instance goes down, Service Fabric + will immediately start building its replacement. + :type instance_restart_wait_duration_seconds: str """ _validation = { 'service_kind': {'required': True}, 'instance_count': {'minimum': -1}, + 'min_instance_count': {'minimum': 1}, + 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -21861,53 +23928,65 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, + 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, 'instance_close_delay_duration_seconds': {'key': 'InstanceCloseDelayDurationSeconds', 'type': 'str'}, + 'instance_lifecycle_description': {'key': 'InstanceLifecycleDescription', 'type': 'InstanceLifecycleDescription'}, + 'instance_restart_wait_duration_seconds': {'key': 'InstanceRestartWaitDurationSeconds', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StatelessServiceUpdateDescription, self).__init__(**kwargs) + self.service_kind = 'Stateless' # type: str self.instance_count = kwargs.get('instance_count', None) - self.min_instance_count = kwargs.get('min_instance_count', None) - self.min_instance_percentage = kwargs.get('min_instance_percentage', None) + self.min_instance_count = kwargs.get('min_instance_count', 1) + self.min_instance_percentage = kwargs.get('min_instance_percentage', 0) self.instance_close_delay_duration_seconds = kwargs.get('instance_close_delay_duration_seconds', None) - self.service_kind = 'Stateless' + self.instance_lifecycle_description = kwargs.get('instance_lifecycle_description', None) + self.instance_restart_wait_duration_seconds = kwargs.get('instance_restart_wait_duration_seconds', None) class StoppedChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos stops because either - the user issued a stop or the time to run was up. + """Describes a Chaos event that gets generated when Chaos stops because either the user issued a stop or the time to run was up. All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param reason: Describes why Chaos stopped. Chaos can stop because of - StopChaos API call or the timeToRun provided in ChaosParameters is over. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param reason: Describes why Chaos stopped. Chaos can stop because of StopChaos API call or the + timeToRun provided in ChaosParameters is over. :type reason: str """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StoppedChaosEvent, self).__init__(**kwargs) + self.kind = 'Stopped' # type: str self.reason = kwargs.get('reason', None) - self.kind = 'Stopped' class StringPropertyValue(PropertyValue): @@ -21915,8 +23994,10 @@ class StringPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind :param data: Required. The data of the property value. :type data: str """ @@ -21931,24 +24012,28 @@ class StringPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StringPropertyValue, self).__init__(**kwargs) - self.data = kwargs.get('data', None) - self.kind = 'String' + self.kind = 'String' # type: str + self.data = kwargs['data'] class SuccessfulPropertyBatchInfo(PropertyBatchInfo): - """Derived from PropertyBatchInfo. Represents the property batch succeeding. - Contains the results of any "Get" operations in the batch. + """Derived from PropertyBatchInfo. Represents the property batch succeeding. Contains the results of any "Get" operations in the batch. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param properties: A map containing the properties that were requested - through any "Get" property batch operations. The key represents the index - of the "Get" operation in the original request, in string form. The value - is the property. If a property is not found, it will not be in the map. + :param kind: Required. The kind of property batch info, determined by the results of a property + batch. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Successful", "Failed". + :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind + :param properties: A map containing the properties that were requested through any "Get" + property batch operations. The key represents the index of the "Get" operation in the original + request, in string form. The value is the property. If a property is not found, it will not be + in the map. :type properties: dict[str, ~azure.servicefabric.models.PropertyInfo] """ @@ -21961,37 +24046,41 @@ class SuccessfulPropertyBatchInfo(PropertyBatchInfo): 'properties': {'key': 'Properties', 'type': '{PropertyInfo}'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SuccessfulPropertyBatchInfo, self).__init__(**kwargs) + self.kind = 'Successful' # type: str self.properties = kwargs.get('properties', None) - self.kind = 'Successful' class SystemApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for the fabric:/System application, containing - information about the data and the algorithm used by health store to - evaluate health. The evaluation is returned only when the aggregated health - state of the cluster is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for the fabric:/System application, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state of the cluster is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the system application. The types - of the unhealthy evaluations can be DeployedApplicationsHealthEvaluation, - ServicesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the system application. The types of the unhealthy evaluations can be + DeployedApplicationsHealthEvaluation, ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -21999,30 +24088,32 @@ class SystemApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SystemApplicationHealthEvaluation, self).__init__(**kwargs) + self.kind = 'SystemApplication' # type: str self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'SystemApplication' -class TcpConfig(Model): +class TcpConfig(msrest.serialization.Model): """Describes the tcp configuration for external connectivity for this network. All required parameters must be populated in order to send to Azure. :param name: Required. tcp gateway config name. :type name: str - :param port: Required. Specifies the port at which the service endpoint - below needs to be exposed. + :param port: Required. Specifies the port at which the service endpoint below needs to be + exposed. :type port: int - :param destination: Required. Describes destination endpoint for routing - traffic. + :param destination: Required. Describes destination endpoint for routing traffic. :type destination: ~azure.servicefabric.models.GatewayDestination """ @@ -22038,49 +24129,52 @@ class TcpConfig(Model): 'destination': {'key': 'destination', 'type': 'GatewayDestination'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(TcpConfig, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.port = kwargs.get('port', None) - self.destination = kwargs.get('destination', None) + self.name = kwargs['name'] + self.port = kwargs['port'] + self.destination = kwargs['destination'] class TestErrorChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when an unexpected event occurs - in the Chaos engine. - For example, due to the cluster snapshot being inconsistent, while faulting - an entity, Chaos found that the entity was already faulted -- which would - be an unexpected event. - - All required parameters must be populated in order to send to Azure. - - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param reason: Describes why TestErrorChaosEvent was generated. For - example, Chaos tries to fault a partition but finds that the partition is - no longer fault tolerant, then a TestErrorEvent gets generated with the - reason stating that the partition is not fault tolerant. + """Describes a Chaos event that gets generated when an unexpected event occurs in the Chaos engine. +For example, due to the cluster snapshot being inconsistent, while faulting an entity, Chaos found that the entity was already faulted -- which would be an unexpected event. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param reason: Describes why TestErrorChaosEvent was generated. For example, Chaos tries to + fault a partition but finds that the partition is no longer fault tolerant, then a + TestErrorEvent gets generated with the reason stating that the partition is not fault tolerant. :type reason: str """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(TestErrorChaosEvent, self).__init__(**kwargs) + self.kind = 'TestError' # type: str self.reason = kwargs.get('reason', None) - self.kind = 'TestError' class TimeBasedBackupScheduleDescription(BackupScheduleDescription): @@ -22088,21 +24182,20 @@ class TimeBasedBackupScheduleDescription(BackupScheduleDescription): All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. Constant filled by server. - :type schedule_kind: str - :param schedule_frequency_type: Required. Describes the frequency with - which to run the time based backup schedule. Possible values include: - 'Invalid', 'Daily', 'Weekly' - :type schedule_frequency_type: str or - ~azure.servicefabric.models.BackupScheduleFrequencyType - :param run_days: List of days of a week when to trigger the periodic - backup. This is valid only when the backup schedule frequency type is - weekly. + :param schedule_kind: Required. The kind of backup schedule, time based or frequency + based.Constant filled by server. Possible values include: "Invalid", "TimeBased", + "FrequencyBased". + :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind + :param schedule_frequency_type: Required. Describes the frequency with which to run the time + based backup schedule. Possible values include: "Invalid", "Daily", "Weekly". + :type schedule_frequency_type: str or ~azure.servicefabric.models.BackupScheduleFrequencyType + :param run_days: List of days of a week when to trigger the periodic backup. This is valid only + when the backup schedule frequency type is weekly. :type run_days: list[str or ~azure.servicefabric.models.DayOfWeek] - :param run_times: Required. Represents the list of exact time during the - day in ISO8601 format. Like '19:00:00' will represent '7PM' during the - day. Date specified along with time will be ignored. - :type run_times: list[datetime] + :param run_times: Required. Represents the list of exact time during the day in ISO8601 format. + Like '19:00:00' will represent '7PM' during the day. Date specified along with time will be + ignored. + :type run_times: list[~datetime.datetime] """ _validation = { @@ -22118,22 +24211,23 @@ class TimeBasedBackupScheduleDescription(BackupScheduleDescription): 'run_times': {'key': 'RunTimes', 'type': '[iso-8601]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(TimeBasedBackupScheduleDescription, self).__init__(**kwargs) - self.schedule_frequency_type = kwargs.get('schedule_frequency_type', None) + self.schedule_kind = 'TimeBased' # type: str + self.schedule_frequency_type = kwargs['schedule_frequency_type'] self.run_days = kwargs.get('run_days', None) - self.run_times = kwargs.get('run_times', None) - self.schedule_kind = 'TimeBased' + self.run_times = kwargs['run_times'] -class TimeOfDay(Model): +class TimeOfDay(msrest.serialization.Model): """Defines an hour and minute of the day specified in 24 hour time. - :param hour: Represents the hour of the day. Value must be between 0 and - 23 inclusive. + :param hour: Represents the hour of the day. Value must be between 0 and 23 inclusive. :type hour: int - :param minute: Represents the minute of the hour. Value must be between 0 - to 59 inclusive. + :param minute: Represents the minute of the hour. Value must be between 0 to 59 inclusive. :type minute: int """ @@ -22147,20 +24241,21 @@ class TimeOfDay(Model): 'minute': {'key': 'Minute', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(TimeOfDay, self).__init__(**kwargs) self.hour = kwargs.get('hour', None) self.minute = kwargs.get('minute', None) -class TimeRange(Model): +class TimeRange(msrest.serialization.Model): """Defines a time range in a 24 hour day specified by a start and end time. - :param start_time: Defines an hour and minute of the day specified in 24 - hour time. + :param start_time: Defines an hour and minute of the day specified in 24 hour time. :type start_time: ~azure.servicefabric.models.TimeOfDay - :param end_time: Defines an hour and minute of the day specified in 24 - hour time. + :param end_time: Defines an hour and minute of the day specified in 24 hour time. :type end_time: ~azure.servicefabric.models.TimeOfDay """ @@ -22169,28 +24264,29 @@ class TimeRange(Model): 'end_time': {'key': 'EndTime', 'type': 'TimeOfDay'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(TimeRange, self).__init__(**kwargs) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): - """Describes a partitioning scheme where an integer range is allocated evenly - across a number of partitions. + """Describes a partitioning scheme where an integer range is allocated evenly across a number of partitions. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Constant filled by server. - :type partition_scheme: str + :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by + server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". + :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme :param count: Required. The number of partitions. :type count: int - :param low_key: Required. String indicating the lower bound of the - partition key range that + :param low_key: Required. String indicating the lower bound of the partition key range that should be split between the partitions. :type low_key: str - :param high_key: Required. String indicating the upper bound of the - partition key range that + :param high_key: Required. String indicating the upper bound of the partition key range that should be split between the partitions. :type high_key: str """ @@ -22209,23 +24305,25 @@ class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UniformInt64RangePartitionSchemeDescription, self).__init__(**kwargs) - self.count = kwargs.get('count', None) - self.low_key = kwargs.get('low_key', None) - self.high_key = kwargs.get('high_key', None) - self.partition_scheme = 'UniformInt64Range' + self.partition_scheme = 'UniformInt64Range' # type: str + self.count = kwargs['count'] + self.low_key = kwargs['low_key'] + self.high_key = kwargs['high_key'] -class UnplacedReplicaInformation(Model): +class UnplacedReplicaInformation(msrest.serialization.Model): """Contains information for an unplaced replica. :param service_name: The name of the service. :type service_name: str :param partition_id: The ID of the partition. :type partition_id: str - :param unplaced_replica_details: List of reasons due to which a replica - cannot be placed. + :param unplaced_replica_details: List of reasons due to which a replica cannot be placed. :type unplaced_replica_details: list[str] """ @@ -22235,27 +24333,28 @@ class UnplacedReplicaInformation(Model): 'unplaced_replica_details': {'key': 'UnplacedReplicaDetails', 'type': '[str]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UnplacedReplicaInformation, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) self.unplaced_replica_details = kwargs.get('unplaced_replica_details', None) -class UnprovisionApplicationTypeDescriptionInfo(Model): - """Describes the operation to unregister or unprovision an application type - and its version that was registered with the Service Fabric. +class UnprovisionApplicationTypeDescriptionInfo(msrest.serialization.Model): + """Describes the operation to unregister or unprovision an application type and its version that was registered with the Service Fabric. All required parameters must be populated in order to send to Azure. - :param application_type_version: Required. The version of the application - type as defined in the application manifest. + :param application_type_version: Required. The version of the application type as defined in + the application manifest. :type application_type_version: str - :param async_property: The flag indicating whether or not unprovision - should occur asynchronously. When set to true, the unprovision operation - returns when the request is accepted by the system, and the unprovision - operation continues without any timeout limit. The default value is false. - However, we recommend setting it to true for large application packages + :param async_property: The flag indicating whether or not unprovision should occur + asynchronously. When set to true, the unprovision operation returns when the request is + accepted by the system, and the unprovision operation continues without any timeout limit. The + default value is false. However, we recommend setting it to true for large application packages that were provisioned. :type async_property: bool """ @@ -22269,13 +24368,16 @@ class UnprovisionApplicationTypeDescriptionInfo(Model): 'async_property': {'key': 'Async', 'type': 'bool'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UnprovisionApplicationTypeDescriptionInfo, self).__init__(**kwargs) - self.application_type_version = kwargs.get('application_type_version', None) + self.application_type_version = kwargs['application_type_version'] self.async_property = kwargs.get('async_property', None) -class UnprovisionFabricDescription(Model): +class UnprovisionFabricDescription(msrest.serialization.Model): """Describes the parameters for unprovisioning a cluster. :param code_version: The cluster code package version. @@ -22289,40 +24391,37 @@ class UnprovisionFabricDescription(Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UnprovisionFabricDescription, self).__init__(**kwargs) self.code_version = kwargs.get('code_version', None) self.config_version = kwargs.get('config_version', None) -class UpdateClusterUpgradeDescription(Model): +class UpdateClusterUpgradeDescription(msrest.serialization.Model): """Parameters for updating a cluster upgrade. - :param upgrade_kind: The type of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling', - 'Rolling_ForceRestart'. Default value: "Rolling" . + :param upgrade_kind: The type of upgrade out of the following possible values. Possible values + include: "Invalid", "Rolling", "Rolling_ForceRestart". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeType - :param update_description: Describes the parameters for updating a rolling - upgrade of application or cluster. - :type update_description: - ~azure.servicefabric.models.RollingUpgradeUpdateDescription - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param enable_delta_health_evaluation: When true, enables delta health - evaluation rather than absolute health evaluation after completion of each - upgrade domain. + :param update_description: Describes the parameters for updating a rolling upgrade of + application or cluster. + :type update_description: ~azure.servicefabric.models.RollingUpgradeUpdateDescription + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than + absolute health evaluation after completion of each upgrade domain. :type enable_delta_health_evaluation: bool - :param cluster_upgrade_health_policy: Defines a health policy used to - evaluate the health of the cluster during a cluster upgrade. + :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of + the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Defines the application health - policy map used to evaluate the health of an application or one of its - children entities. - :type application_health_policy_map: - ~azure.servicefabric.models.ApplicationHealthPolicies + :param application_health_policy_map: Defines the application health policy map used to + evaluate the health of an application or one of its children entities. + :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicies """ _attribute_map = { @@ -22334,7 +24433,10 @@ class UpdateClusterUpgradeDescription(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicies'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UpdateClusterUpgradeDescription, self).__init__(**kwargs) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.update_description = kwargs.get('update_description', None) @@ -22344,15 +24446,13 @@ def __init__(self, **kwargs): self.application_health_policy_map = kwargs.get('application_health_policy_map', None) -class UpdatePartitionLoadResult(Model): - """Specifies result of updating load for specified partitions. The output will - be ordered based on the partition ID. +class UpdatePartitionLoadResult(msrest.serialization.Model): + """Specifies result of updating load for specified partitions. The output will be ordered based on the partition ID. :param partition_id: Id of the partition. :type partition_id: str - :param partition_error_code: If OperationState is Completed - this is 0. - If OperationState is Faulted - this is an error code indicating the - reason. + :param partition_error_code: If OperationState is Completed - this is 0. If OperationState is + Faulted - this is an error code indicating the reason. :type partition_error_code: int """ @@ -22361,53 +24461,55 @@ class UpdatePartitionLoadResult(Model): 'partition_error_code': {'key': 'PartitionErrorCode', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UpdatePartitionLoadResult, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.partition_error_code = kwargs.get('partition_error_code', None) class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): - """Represents health evaluation for delta unhealthy cluster nodes in an - upgrade domain, containing health evaluations for each unhealthy node that - impacted current aggregated health state. - Can be returned during cluster upgrade when cluster aggregated health state - is Warning or Error. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for delta unhealthy cluster nodes in an upgrade domain, containing health evaluations for each unhealthy node that impacted current aggregated health state. +Can be returned during cluster upgrade when cluster aggregated health state is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param upgrade_domain_name: Name of the upgrade domain where nodes health - is currently evaluated. + :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently + evaluated. :type upgrade_domain_name: str - :param baseline_error_count: Number of upgrade domain nodes with - aggregated heath state Error in the health store at the beginning of the - cluster upgrade. + :param baseline_error_count: Number of upgrade domain nodes with aggregated heath state Error + in the health store at the beginning of the cluster upgrade. :type baseline_error_count: long - :param baseline_total_count: Total number of upgrade domain nodes in the - health store at the beginning of the cluster upgrade. + :param baseline_total_count: Total number of upgrade domain nodes in the health store at the + beginning of the cluster upgrade. :type baseline_total_count: long - :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of - upgrade domain delta unhealthy nodes from the ClusterUpgradeHealthPolicy. + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of upgrade domain delta + unhealthy nodes from the ClusterUpgradeHealthPolicy. :type max_percent_delta_unhealthy_nodes: int - :param total_count: Total number of upgrade domain nodes in the health - store. + :param total_count: Total number of upgrade domain nodes in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -22415,9 +24517,9 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, @@ -22426,24 +24528,27 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UpgradeDomainDeltaNodesCheckHealthEvaluation, self).__init__(**kwargs) + self.kind = 'UpgradeDomainDeltaNodesCheck' # type: str self.upgrade_domain_name = kwargs.get('upgrade_domain_name', None) self.baseline_error_count = kwargs.get('baseline_error_count', None) self.baseline_total_count = kwargs.get('baseline_total_count', None) self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'UpgradeDomainDeltaNodesCheck' -class UpgradeDomainInfo(Model): +class UpgradeDomainInfo(msrest.serialization.Model): """Information about an upgrade domain. - :param name: The name of the upgrade domain + :param name: The name of the upgrade domain. :type name: str - :param state: The state of the upgrade domain. Possible values include: - 'Invalid', 'Pending', 'InProgress', 'Completed' + :param state: The state of the upgrade domain. Possible values include: "Invalid", "Pending", + "InProgress", "Completed". :type state: str or ~azure.servicefabric.models.UpgradeDomainState """ @@ -22452,45 +24557,48 @@ class UpgradeDomainInfo(Model): 'state': {'key': 'State', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UpgradeDomainInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.state = kwargs.get('state', None) class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for cluster nodes in an upgrade domain, - containing health evaluations for each unhealthy node that impacted current - aggregated health state. Can be returned when evaluating cluster health - during cluster upgrade and the aggregated health state is either Error or - Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for cluster nodes in an upgrade domain, containing health evaluations for each unhealthy node that impacted current aggregated health state. Can be returned when evaluating cluster health during cluster upgrade and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param upgrade_domain_name: Name of the upgrade domain where nodes health - is currently evaluated. + :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently + evaluated. :type upgrade_domain_name: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of - unhealthy nodes from the ClusterHealthPolicy. + :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes from the + ClusterHealthPolicy. :type max_percent_unhealthy_nodes: int :param total_count: Total number of nodes in the current upgrade domain. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -22498,29 +24606,31 @@ class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UpgradeDomainNodesHealthEvaluation, self).__init__(**kwargs) + self.kind = 'UpgradeDomainNodes' # type: str self.upgrade_domain_name = kwargs.get('upgrade_domain_name', None) self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) - self.kind = 'UpgradeDomainNodes' -class UpgradeOrchestrationServiceState(Model): +class UpgradeOrchestrationServiceState(msrest.serialization.Model): """Service state of Service Fabric Upgrade Orchestration Service. - :param service_state: The state of Service Fabric Upgrade Orchestration - Service. + :param service_state: The state of Service Fabric Upgrade Orchestration Service. :type service_state: str """ @@ -22528,26 +24638,26 @@ class UpgradeOrchestrationServiceState(Model): 'service_state': {'key': 'ServiceState', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UpgradeOrchestrationServiceState, self).__init__(**kwargs) self.service_state = kwargs.get('service_state', None) -class UpgradeOrchestrationServiceStateSummary(Model): +class UpgradeOrchestrationServiceStateSummary(msrest.serialization.Model): """Service state summary of Service Fabric Upgrade Orchestration Service. :param current_code_version: The current code version of the cluster. :type current_code_version: str - :param current_manifest_version: The current manifest version of the - cluster. + :param current_manifest_version: The current manifest version of the cluster. :type current_manifest_version: str :param target_code_version: The target code version of the cluster. :type target_code_version: str - :param target_manifest_version: The target manifest version of the - cluster. + :param target_manifest_version: The target manifest version of the cluster. :type target_manifest_version: str - :param pending_upgrade_type: The type of the pending upgrade of the - cluster. + :param pending_upgrade_type: The type of the pending upgrade of the cluster. :type pending_upgrade_type: str """ @@ -22559,7 +24669,10 @@ class UpgradeOrchestrationServiceStateSummary(Model): 'pending_upgrade_type': {'key': 'PendingUpgradeType', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UpgradeOrchestrationServiceStateSummary, self).__init__(**kwargs) self.current_code_version = kwargs.get('current_code_version', None) self.current_manifest_version = kwargs.get('current_manifest_version', None) @@ -22568,14 +24681,14 @@ def __init__(self, **kwargs): self.pending_upgrade_type = kwargs.get('pending_upgrade_type', None) -class UploadChunkRange(Model): +class UploadChunkRange(msrest.serialization.Model): """Information about which portion of the file to upload. - :param start_position: The start position of the portion of the file. It's - represented by the number of bytes. + :param start_position: The start position of the portion of the file. It's represented by the + number of bytes. :type start_position: str - :param end_position: The end position of the portion of the file. It's - represented by the number of bytes. + :param end_position: The end position of the portion of the file. It's represented by the + number of bytes. :type end_position: str """ @@ -22584,19 +24697,21 @@ class UploadChunkRange(Model): 'end_position': {'key': 'EndPosition', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UploadChunkRange, self).__init__(**kwargs) self.start_position = kwargs.get('start_position', None) self.end_position = kwargs.get('end_position', None) -class UploadSession(Model): +class UploadSession(msrest.serialization.Model): """Information about a image store upload session. - :param upload_sessions: When querying upload session by upload session ID, - the result contains only one upload session. When querying upload session - by image store relative path, the result might contain multiple upload - sessions. + :param upload_sessions: When querying upload session by upload session ID, the result contains + only one upload session. When querying upload session by image store relative path, the result + might contain multiple upload sessions. :type upload_sessions: list[~azure.servicefabric.models.UploadSessionInfo] """ @@ -22604,28 +24719,28 @@ class UploadSession(Model): 'upload_sessions': {'key': 'UploadSessions', 'type': '[UploadSessionInfo]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UploadSession, self).__init__(**kwargs) self.upload_sessions = kwargs.get('upload_sessions', None) -class UploadSessionInfo(Model): - """Information about an image store upload session. A session is associated - with a relative path in the image store. +class UploadSessionInfo(msrest.serialization.Model): + """Information about an image store upload session. A session is associated with a relative path in the image store. - :param store_relative_path: The remote location within image store. This - path is relative to the image store root. + :param store_relative_path: The remote location within image store. This path is relative to + the image store root. :type store_relative_path: str - :param session_id: A unique ID of the upload session. A session ID can be - reused only if the session was committed or removed. + :param session_id: A unique ID of the upload session. A session ID can be reused only if the + session was committed or removed. :type session_id: str - :param modified_date: The date and time when the upload session was last - modified. - :type modified_date: datetime + :param modified_date: The date and time when the upload session was last modified. + :type modified_date: ~datetime.datetime :param file_size: The size in bytes of the uploading file. :type file_size: str - :param expected_ranges: List of chunk ranges that image store has not - received yet. + :param expected_ranges: List of chunk ranges that image store has not received yet. :type expected_ranges: list[~azure.servicefabric.models.UploadChunkRange] """ @@ -22637,7 +24752,10 @@ class UploadSessionInfo(Model): 'expected_ranges': {'key': 'ExpectedRanges', 'type': '[UploadChunkRange]'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UploadSessionInfo, self).__init__(**kwargs) self.store_relative_path = kwargs.get('store_relative_path', None) self.session_id = kwargs.get('session_id', None) @@ -22646,13 +24764,12 @@ def __init__(self, **kwargs): self.expected_ranges = kwargs.get('expected_ranges', None) -class UsageInfo(Model): - """Information about how much space and how many files in the file system the - ImageStore is using in this category. +class UsageInfo(msrest.serialization.Model): + """Information about how much space and how many files in the file system the ImageStore is using in this category. - :param used_space: the size of all files in this category + :param used_space: the size of all files in this category. :type used_space: str - :param file_count: the number of all files in this category + :param file_count: the number of all files in this category. :type file_count: str """ @@ -22661,7 +24778,10 @@ class UsageInfo(Model): 'file_count': {'key': 'FileCount', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(UsageInfo, self).__init__(**kwargs) self.used_space = kwargs.get('used_space', None) self.file_count = kwargs.get('file_count', None) @@ -22672,48 +24792,50 @@ class ValidationFailedChaosEvent(ChaosEvent): All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param reason: Describes why the ValidationFailedChaosEvent was generated. - This may happen because more than MaxPercentUnhealthyNodes are unhealthy - for more than MaxClusterStabilizationTimeout. This reason will be in the - Reason property of the ValidationFailedChaosEvent as a string. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param reason: Describes why the ValidationFailedChaosEvent was generated. This may happen + because more than MaxPercentUnhealthyNodes are unhealthy for more than + MaxClusterStabilizationTimeout. This reason will be in the Reason property of the + ValidationFailedChaosEvent as a string. :type reason: str """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(ValidationFailedChaosEvent, self).__init__(**kwargs) + self.kind = 'ValidationFailed' # type: str self.reason = kwargs.get('reason', None) - self.kind = 'ValidationFailed' -class VolumeProviderParametersAzureFile(Model): +class VolumeProviderParametersAzureFile(msrest.serialization.Model): """This type describes a volume provided by an Azure Files file share. All required parameters must be populated in order to send to Azure. - :param account_name: Required. Name of the Azure storage account for the - File Share. + :param account_name: Required. Name of the Azure storage account for the File Share. :type account_name: str - :param account_key: Access key of the Azure storage account for the File - Share. + :param account_key: Access key of the Azure storage account for the File Share. :type account_key: str - :param share_name: Required. Name of the Azure Files file share that - provides storage for the volume. + :param share_name: Required. Name of the Azure Files file share that provides storage for the + volume. :type share_name: str """ @@ -22728,18 +24850,20 @@ class VolumeProviderParametersAzureFile(Model): 'share_name': {'key': 'shareName', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(VolumeProviderParametersAzureFile, self).__init__(**kwargs) - self.account_name = kwargs.get('account_name', None) + self.account_name = kwargs['account_name'] self.account_key = kwargs.get('account_key', None) - self.share_name = kwargs.get('share_name', None) + self.share_name = kwargs['share_name'] -class VolumeResourceDescription(Model): +class VolumeResourceDescription(msrest.serialization.Model): """This type describes a volume resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. @@ -22747,26 +24871,23 @@ class VolumeResourceDescription(Model): :type name: str :param description: User readable description of the volume. :type description: str - :ivar status: Status of the volume. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the volume. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the volume. + :ivar status_details: Gives additional information about the current status of the volume. :vartype status_details: str - :ivar provider: Required. Provider of the volume. Default value: - "SFAzureFile" . - :vartype provider: str - :param azure_file_parameters: This type describes a volume provided by an - Azure Files file share. - :type azure_file_parameters: - ~azure.servicefabric.models.VolumeProviderParametersAzureFile + :param provider: Required. Provider of the volume. Possible values include: "SFAzureFile". + :type provider: str or ~azure.servicefabric.models.VolumeProvider + :param azure_file_parameters: This type describes a volume provided by an Azure Files file + share. + :type azure_file_parameters: ~azure.servicefabric.models.VolumeProviderParametersAzureFile """ _validation = { 'name': {'required': True}, 'status': {'readonly': True}, 'status_details': {'readonly': True}, - 'provider': {'required': True, 'constant': True}, + 'provider': {'required': True}, } _attribute_map = { @@ -22778,29 +24899,31 @@ class VolumeResourceDescription(Model): 'azure_file_parameters': {'key': 'properties.azureFileParameters', 'type': 'VolumeProviderParametersAzureFile'}, } - provider = "SFAzureFile" - - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(VolumeResourceDescription, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.description = kwargs.get('description', None) self.status = None self.status_details = None + self.provider = kwargs['provider'] self.azure_file_parameters = kwargs.get('azure_file_parameters', None) class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the replica build operation to finish. This - indicates that there is a replica that is going through the copy or is - providing data for building another replica. Bring the node down will abort - this copy operation which are typically expensive involving data movements. + """Safety check that waits for the replica build operation to finish. This indicates that there is a replica that is going through the copy or is providing data for building another replica. Bring the node down will abort this copy operation which are typically expensive involving data movements. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -22813,21 +24936,26 @@ class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(WaitForInbuildReplicaSafetyCheck, self).__init__(**kwargs) - self.kind = 'WaitForInbuildReplica' + self.kind = 'WaitForInbuildReplica' # type: str class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the primary replica that was moved out of the - node due to upgrade to be placed back again on that node. + """Safety check that waits for the primary replica that was moved out of the node due to upgrade to be placed back again on that node. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -22840,22 +24968,26 @@ class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(WaitForPrimaryPlacementSafetyCheck, self).__init__(**kwargs) - self.kind = 'WaitForPrimaryPlacement' + self.kind = 'WaitForPrimaryPlacement' # type: str class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the primary replica to be moved out of the node - before starting an upgrade to ensure the availability of the primary - replica for the partition. + """Safety check that waits for the primary replica to be moved out of the node before starting an upgrade to ensure the availability of the primary replica for the partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -22868,21 +25000,26 @@ class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(WaitForPrimarySwapSafetyCheck, self).__init__(**kwargs) - self.kind = 'WaitForPrimarySwap' + self.kind = 'WaitForPrimarySwap' # type: str class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the current reconfiguration of the partition to - be completed before starting an upgrade. + """Safety check that waits for the current reconfiguration of the partition to be completed before starting an upgrade. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -22895,40 +25032,45 @@ class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(WaitForReconfigurationSafetyCheck, self).__init__(**kwargs) - self.kind = 'WaitForReconfiguration' + self.kind = 'WaitForReconfiguration' # type: str class WaitingChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos is waiting for the - cluster to become ready for faulting, for example, Chaos may be waiting for - the on-going upgrade to finish. + """Describes a Chaos event that gets generated when Chaos is waiting for the cluster to become ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param reason: Describes why the WaitingChaosEvent was generated, for - example, due to a cluster upgrade. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param reason: Describes why the WaitingChaosEvent was generated, for example, due to a cluster + upgrade. :type reason: str """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(WaitingChaosEvent, self).__init__(**kwargs) + self.kind = 'Waiting' # type: str self.reason = kwargs.get('reason', None) - self.kind = 'Waiting' diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models_py3.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models_py3.py index 9ab5e9fe4058..9ea894eb0833 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models_py3.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models_py3.py @@ -1,19 +1,21 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from msrest.serialization import Model -from msrest.exceptions import HttpOperationError +import datetime +from typing import Dict, List, Optional, Union +from azure.core.exceptions import HttpResponseError +import msrest.serialization -class AadMetadata(Model): +from ._service_fabric_client_apis_enums import * + + +class AadMetadata(msrest.serialization.Model): """Azure Active Directory metadata used for secured connection to cluster. :param authority: The AAD authority url. @@ -39,7 +41,17 @@ class AadMetadata(Model): 'tenant': {'key': 'tenant', 'type': 'str'}, } - def __init__(self, *, authority: str=None, client: str=None, cluster: str=None, login: str=None, redirect: str=None, tenant: str=None, **kwargs) -> None: + def __init__( + self, + *, + authority: Optional[str] = None, + client: Optional[str] = None, + cluster: Optional[str] = None, + login: Optional[str] = None, + redirect: Optional[str] = None, + tenant: Optional[str] = None, + **kwargs + ): super(AadMetadata, self).__init__(**kwargs) self.authority = authority self.client = client @@ -49,14 +61,12 @@ def __init__(self, *, authority: str=None, client: str=None, cluster: str=None, self.tenant = tenant -class AadMetadataObject(Model): - """Azure Active Directory metadata object used for secured connection to - cluster. +class AadMetadataObject(msrest.serialization.Model): + """Azure Active Directory metadata object used for secured connection to cluster. :param type: The client authentication method. :type type: str - :param metadata: Azure Active Directory metadata used for secured - connection to cluster. + :param metadata: Azure Active Directory metadata used for secured connection to cluster. :type metadata: ~azure.servicefabric.models.AadMetadata """ @@ -65,23 +75,30 @@ class AadMetadataObject(Model): 'metadata': {'key': 'metadata', 'type': 'AadMetadata'}, } - def __init__(self, *, type: str=None, metadata=None, **kwargs) -> None: + def __init__( + self, + *, + type: Optional[str] = None, + metadata: Optional["AadMetadata"] = None, + **kwargs + ): super(AadMetadataObject, self).__init__(**kwargs) self.type = type self.metadata = metadata -class ScalingMechanismDescription(Model): +class ScalingMechanismDescription(msrest.serialization.Model): """Describes the mechanism for performing a scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionInstanceCountScaleMechanism, - AddRemoveIncrementalNamedPartitionScalingMechanism + sub-classes are: AddRemoveIncrementalNamedPartitionScalingMechanism, PartitionInstanceCountScaleMechanism. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. + Possible values include: "Invalid", "PartitionInstanceCount", + "AddRemoveIncrementalNamedPartition". + :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind """ _validation = { @@ -93,30 +110,32 @@ class ScalingMechanismDescription(Model): } _subtype_map = { - 'kind': {'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism', 'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism'} + 'kind': {'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism', 'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(ScalingMechanismDescription, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescription): - """Represents a scaling mechanism for adding or removing named partitions of a - stateless service. Partition names are in the format '0','1''N-1'. + """Represents a scaling mechanism for adding or removing named partitions of a stateless service. Partition names are in the format '0','1''N-1'. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param min_partition_count: Required. Minimum number of named partitions - of the service. + :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. + Possible values include: "Invalid", "PartitionInstanceCount", + "AddRemoveIncrementalNamedPartition". + :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind + :param min_partition_count: Required. Minimum number of named partitions of the service. :type min_partition_count: int - :param max_partition_count: Required. Maximum number of named partitions - of the service. + :param max_partition_count: Required. Maximum number of named partitions of the service. :type max_partition_count: int - :param scale_increment: Required. The number of instances to add or remove - during a scaling operation. + :param scale_increment: Required. The number of instances to add or remove during a scaling + operation. :type scale_increment: int """ @@ -134,25 +153,32 @@ class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescrip 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, } - def __init__(self, *, min_partition_count: int, max_partition_count: int, scale_increment: int, **kwargs) -> None: + def __init__( + self, + *, + min_partition_count: int, + max_partition_count: int, + scale_increment: int, + **kwargs + ): super(AddRemoveIncrementalNamedPartitionScalingMechanism, self).__init__(**kwargs) + self.kind = 'AddRemoveIncrementalNamedPartition' # type: str self.min_partition_count = min_partition_count self.max_partition_count = max_partition_count self.scale_increment = scale_increment - self.kind = 'AddRemoveIncrementalNamedPartition' -class AutoScalingMechanism(Model): - """Describes the mechanism for performing auto scaling operation. Derived - classes will describe the actual mechanism. +class AutoScalingMechanism(msrest.serialization.Model): + """Describes the mechanism for performing auto scaling operation. Derived classes will describe the actual mechanism. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AddRemoveReplicaScalingMechanism + sub-classes are: AddRemoveReplicaScalingMechanism. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of auto scaling mechanism.Constant filled by server. Possible + values include: "AddRemoveReplica". + :type kind: str or ~azure.servicefabric.models.AutoScalingMechanismKind """ _validation = { @@ -167,27 +193,30 @@ class AutoScalingMechanism(Model): 'kind': {'AddRemoveReplica': 'AddRemoveReplicaScalingMechanism'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(AutoScalingMechanism, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class AddRemoveReplicaScalingMechanism(AutoScalingMechanism): - """Describes the horizontal auto scaling mechanism that adds or removes - replicas (containers or container groups). + """Describes the horizontal auto scaling mechanism that adds or removes replicas (containers or container groups). All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param min_count: Required. Minimum number of containers (scale down won't - be performed below this number). + :param kind: Required. The type of auto scaling mechanism.Constant filled by server. Possible + values include: "AddRemoveReplica". + :type kind: str or ~azure.servicefabric.models.AutoScalingMechanismKind + :param min_count: Required. Minimum number of containers (scale down won't be performed below + this number). :type min_count: int - :param max_count: Required. Maximum number of containers (scale up won't - be performed above this number). + :param max_count: Required. Maximum number of containers (scale up won't be performed above + this number). :type max_count: int - :param scale_increment: Required. Each time auto scaling is performed, - this number of containers will be added or removed. + :param scale_increment: Required. Each time auto scaling is performed, this number of + containers will be added or removed. :type scale_increment: int """ @@ -205,21 +234,28 @@ class AddRemoveReplicaScalingMechanism(AutoScalingMechanism): 'scale_increment': {'key': 'scaleIncrement', 'type': 'int'}, } - def __init__(self, *, min_count: int, max_count: int, scale_increment: int, **kwargs) -> None: + def __init__( + self, + *, + min_count: int, + max_count: int, + scale_increment: int, + **kwargs + ): super(AddRemoveReplicaScalingMechanism, self).__init__(**kwargs) + self.kind = 'AddRemoveReplica' # type: str self.min_count = min_count self.max_count = max_count self.scale_increment = scale_increment - self.kind = 'AddRemoveReplica' -class AnalysisEventMetadata(Model): +class AnalysisEventMetadata(msrest.serialization.Model): """Metadata about an Analysis Event. :param delay: The analysis delay. - :type delay: timedelta + :type delay: ~datetime.timedelta :param duration: The duration of analysis. - :type duration: timedelta + :type duration: ~datetime.timedelta """ _attribute_map = { @@ -227,33 +263,38 @@ class AnalysisEventMetadata(Model): 'duration': {'key': 'Duration', 'type': 'duration'}, } - def __init__(self, *, delay=None, duration=None, **kwargs) -> None: + def __init__( + self, + *, + delay: Optional[datetime.timedelta] = None, + duration: Optional[datetime.timedelta] = None, + **kwargs + ): super(AnalysisEventMetadata, self).__init__(**kwargs) self.delay = delay self.duration = duration -class BackupConfigurationInfo(Model): +class BackupConfigurationInfo(msrest.serialization.Model): """Describes the backup configuration information. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationBackupConfigurationInfo, - ServiceBackupConfigurationInfo, PartitionBackupConfigurationInfo + sub-classes are: ApplicationBackupConfigurationInfo, PartitionBackupConfigurationInfo, ServiceBackupConfigurationInfo. All required parameters must be populated in order to send to Azure. - :param policy_name: The name of the backup policy which is applicable to - this Service Fabric application or service or partition. + :param kind: Required. The entity type of a Service Fabric entity such as Application, Service + or a Partition where periodic backups can be enabled.Constant filled by server. Possible + values include: "Invalid", "Partition", "Service", "Application". + :type kind: str or ~azure.servicefabric.models.BackupEntityKind + :param policy_name: The name of the backup policy which is applicable to this Service Fabric + application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup - policy is applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type policy_inherited_from: str or - ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup policy is applied. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { @@ -261,45 +302,49 @@ class BackupConfigurationInfo(Model): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo'} + 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo'} } - def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, **kwargs) -> None: + def __init__( + self, + *, + policy_name: Optional[str] = None, + policy_inherited_from: Optional[Union[str, "BackupPolicyScope"]] = None, + suspension_info: Optional["BackupSuspensionInfo"] = None, + **kwargs + ): super(BackupConfigurationInfo, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.policy_name = policy_name self.policy_inherited_from = policy_inherited_from self.suspension_info = suspension_info - self.kind = None class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information for a specific Service Fabric application - specifying what backup policy is being applied and suspend description, if - any. + """Backup configuration information for a specific Service Fabric application specifying what backup policy is being applied and suspend description, if any. All required parameters must be populated in order to send to Azure. - :param policy_name: The name of the backup policy which is applicable to - this Service Fabric application or service or partition. + :param kind: Required. The entity type of a Service Fabric entity such as Application, Service + or a Partition where periodic backups can be enabled.Constant filled by server. Possible + values include: "Invalid", "Partition", "Service", "Application". + :type kind: str or ~azure.servicefabric.models.BackupEntityKind + :param policy_name: The name of the backup policy which is applicable to this Service Fabric + application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup - policy is applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type policy_inherited_from: str or - ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup policy is applied. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param kind: Required. Constant filled by server. - :type kind: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str """ @@ -308,30 +353,39 @@ class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, application_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + policy_name: Optional[str] = None, + policy_inherited_from: Optional[Union[str, "BackupPolicyScope"]] = None, + suspension_info: Optional["BackupSuspensionInfo"] = None, + application_name: Optional[str] = None, + **kwargs + ): super(ApplicationBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info, **kwargs) + self.kind = 'Application' # type: str self.application_name = application_name - self.kind = 'Application' -class BackupEntity(Model): +class BackupEntity(msrest.serialization.Model): """Describes the Service Fabric entity that is configured for backup. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationBackupEntity, ServiceBackupEntity, - PartitionBackupEntity + sub-classes are: ApplicationBackupEntity, PartitionBackupEntity, ServiceBackupEntity. All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. Constant filled by server. - :type entity_kind: str + :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, + Service or a Partition where periodic backups can be enabled.Constant filled by server. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind """ _validation = { @@ -343,12 +397,15 @@ class BackupEntity(Model): } _subtype_map = { - 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Service': 'ServiceBackupEntity', 'Partition': 'PartitionBackupEntity'} + 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Partition': 'PartitionBackupEntity', 'Service': 'ServiceBackupEntity'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(BackupEntity, self).__init__(**kwargs) - self.entity_kind = None + self.entity_kind = None # type: Optional[str] class ApplicationBackupEntity(BackupEntity): @@ -356,10 +413,11 @@ class ApplicationBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. Constant filled by server. - :type entity_kind: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, + Service or a Partition where periodic backups can be enabled.Constant filled by server. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str """ @@ -372,38 +430,37 @@ class ApplicationBackupEntity(BackupEntity): 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__(self, *, application_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + application_name: Optional[str] = None, + **kwargs + ): super(ApplicationBackupEntity, self).__init__(**kwargs) + self.entity_kind = 'Application' # type: str self.application_name = application_name - self.entity_kind = 'Application' - - -class ApplicationCapacityDescription(Model): - """Describes capacity information for services of this application. This - description can be used for describing the following. - - Reserving the capacity for the services on the nodes - - Limiting the total number of nodes that services of this application can - run on - - Limiting the custom capacity metrics to limit the total consumption of - this metric by the services of this application. - - :param minimum_nodes: The minimum number of nodes where Service Fabric - will reserve capacity for this application. Note that this does not mean - that the services of this application will be placed on all of those - nodes. If this property is set to zero, no capacity will be reserved. The - value of this property cannot be more than the value of the MaximumNodes - property. + + +class ApplicationCapacityDescription(msrest.serialization.Model): + """Describes capacity information for services of this application. This description can be used for describing the following. + + +* Reserving the capacity for the services on the nodes +* Limiting the total number of nodes that services of this application can run on +* Limiting the custom capacity metrics to limit the total consumption of this metric by the services of this application. + + :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity + for this application. Note that this does not mean that the services of this application will + be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. + The value of this property cannot be more than the value of the MaximumNodes property. :type minimum_nodes: long - :param maximum_nodes: The maximum number of nodes where Service Fabric - will reserve capacity for this application. Note that this does not mean - that the services of this application will be placed on all of those - nodes. By default, the value of this property is zero and it means that - the services can be placed on any node. Default value: 0 . + :param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity + for this application. Note that this does not mean that the services of this application will + be placed on all of those nodes. By default, the value of this property is zero and it means + that the services can be placed on any node. :type maximum_nodes: long - :param application_metrics: List of application capacity metric - description. - :type application_metrics: - list[~azure.servicefabric.models.ApplicationMetricDescription] + :param application_metrics: List of application capacity metric description. + :type application_metrics: list[~azure.servicefabric.models.ApplicationMetricDescription] """ _validation = { @@ -417,127 +474,179 @@ class ApplicationCapacityDescription(Model): 'application_metrics': {'key': 'ApplicationMetrics', 'type': '[ApplicationMetricDescription]'}, } - def __init__(self, *, minimum_nodes: int=None, maximum_nodes: int=0, application_metrics=None, **kwargs) -> None: + def __init__( + self, + *, + minimum_nodes: Optional[int] = None, + maximum_nodes: Optional[int] = 0, + application_metrics: Optional[List["ApplicationMetricDescription"]] = None, + **kwargs + ): super(ApplicationCapacityDescription, self).__init__(**kwargs) self.minimum_nodes = minimum_nodes self.maximum_nodes = maximum_nodes self.application_metrics = application_metrics -class FabricEvent(Model): +class FabricEvent(msrest.serialization.Model): """Represents the base for all Fabric Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, - NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'ApplicationEvent': 'ApplicationEvent', 'ClusterEvent': 'ClusterEvent', 'ContainerInstanceEvent': 'ContainerInstanceEvent', 'NodeEvent': 'NodeEvent', 'PartitionEvent': 'PartitionEvent', 'ReplicaEvent': 'ReplicaEvent', 'ServiceEvent': 'ServiceEvent'} } - def __init__(self, *, event_instance_id: str, time_stamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(FabricEvent, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.event_instance_id = event_instance_id self.category = category self.time_stamp = time_stamp self.has_correlated_events = has_correlated_events - self.kind = None class ApplicationEvent(FabricEvent): """Represents the base for all Application Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationCreatedEvent, ApplicationDeletedEvent, - ApplicationNewHealthReportEvent, ApplicationHealthReportExpiredEvent, - ApplicationUpgradeCompletedEvent, ApplicationUpgradeDomainCompletedEvent, - ApplicationUpgradeRollbackCompletedEvent, - ApplicationUpgradeRollbackStartedEvent, ApplicationUpgradeStartedEvent, - DeployedApplicationNewHealthReportEvent, - DeployedApplicationHealthReportExpiredEvent, ApplicationProcessExitedEvent, - ApplicationContainerInstanceExitedEvent, - DeployedServicePackageNewHealthReportEvent, - DeployedServicePackageHealthReportExpiredEvent, - ChaosCodePackageRestartScheduledEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ApplicationContainerInstanceExitedEvent, ApplicationCreatedEvent, ApplicationDeletedEvent, ApplicationHealthReportExpiredEvent, ApplicationNewHealthReportEvent, ApplicationProcessExitedEvent, ApplicationUpgradeCompletedEvent, ApplicationUpgradeDomainCompletedEvent, ApplicationUpgradeRollbackCompletedEvent, ApplicationUpgradeRollbackStartedEvent, ApplicationUpgradeStartedEvent, ChaosCodePackageRestartScheduledEvent, DeployedApplicationHealthReportExpiredEvent, DeployedApplicationNewHealthReportEvent, DeployedServicePackageHealthReportExpiredEvent, DeployedServicePackageNewHealthReportEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationNewHealthReport': 'ApplicationNewHealthReportEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationUpgradeCompleted': 'ApplicationUpgradeCompletedEvent', 'ApplicationUpgradeDomainCompleted': 'ApplicationUpgradeDomainCompletedEvent', 'ApplicationUpgradeRollbackCompleted': 'ApplicationUpgradeRollbackCompletedEvent', 'ApplicationUpgradeRollbackStarted': 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStarted': 'ApplicationUpgradeStartedEvent', 'DeployedApplicationNewHealthReport': 'DeployedApplicationNewHealthReportEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'ApplicationProcessExited': 'ApplicationProcessExitedEvent', 'ApplicationContainerInstanceExited': 'ApplicationContainerInstanceExitedEvent', 'DeployedServicePackageNewHealthReport': 'DeployedServicePackageNewHealthReportEvent', 'DeployedServicePackageHealthReportExpired': 'DeployedServicePackageHealthReportExpiredEvent', 'ChaosCodePackageRestartScheduled': 'ChaosCodePackageRestartScheduledEvent'} - } - - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + 'kind': {'ApplicationContainerInstanceExited': 'ApplicationContainerInstanceExitedEvent', 'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationNewHealthReport': 'ApplicationNewHealthReportEvent', 'ApplicationProcessExited': 'ApplicationProcessExitedEvent', 'ApplicationUpgradeCompleted': 'ApplicationUpgradeCompletedEvent', 'ApplicationUpgradeDomainCompleted': 'ApplicationUpgradeDomainCompletedEvent', 'ApplicationUpgradeRollbackCompleted': 'ApplicationUpgradeRollbackCompletedEvent', 'ApplicationUpgradeRollbackStarted': 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStarted': 'ApplicationUpgradeStartedEvent', 'ChaosCodePackageRestartScheduled': 'ChaosCodePackageRestartScheduledEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'DeployedApplicationNewHealthReport': 'DeployedApplicationNewHealthReportEvent', 'DeployedServicePackageHealthReportExpired': 'DeployedServicePackageHealthReportExpiredEvent', 'DeployedServicePackageNewHealthReport': 'DeployedServicePackageNewHealthReportEvent'} + } + + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ApplicationEvent' # type: str self.application_id = application_id - self.kind = 'ApplicationEvent' class ApplicationContainerInstanceExitedEvent(ApplicationEvent): @@ -545,32 +654,50 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param service_name: Required. Name of Service. :type service_name: str :param service_package_name: Required. Name of Service package. :type service_package_name: str - :param service_package_activation_id: Required. Activation Id of Service - package. + :param service_package_activation_id: Required. Activation Id of Service package. :type service_package_activation_id: str :param is_exclusive: Required. Indicates IsExclusive flag. :type is_exclusive: bool @@ -586,17 +713,16 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): :type host_id: str :param exit_code: Required. Exit code of process. :type exit_code: long - :param unexpected_termination: Required. Indicates if termination is - unexpected. + :param unexpected_termination: Required. Indicates if termination is unexpected. :type unexpected_termination: bool :param start_time: Required. Start time of process. - :type start_time: datetime + :type start_time: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'service_name': {'required': True}, 'service_package_name': {'required': True}, @@ -613,11 +739,11 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, @@ -633,8 +759,30 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_name: str, service_package_name: str, service_package_activation_id: str, is_exclusive: bool, code_package_name: str, entry_point_type: str, image_name: str, container_name: str, host_id: str, exit_code: int, unexpected_termination: bool, start_time, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + service_name: str, + service_package_name: str, + service_package_activation_id: str, + is_exclusive: bool, + code_package_name: str, + entry_point_type: str, + image_name: str, + container_name: str, + host_id: str, + exit_code: int, + unexpected_termination: bool, + start_time: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationContainerInstanceExitedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationContainerInstanceExited' # type: str self.service_name = service_name self.service_package_name = service_package_name self.service_package_activation_id = service_package_activation_id @@ -647,7 +795,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, s self.exit_code = exit_code self.unexpected_termination = unexpected_termination self.start_time = start_time - self.kind = 'ApplicationContainerInstanceExited' class ApplicationCreatedEvent(ApplicationEvent): @@ -655,25 +802,44 @@ class ApplicationCreatedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -684,9 +850,9 @@ class ApplicationCreatedEvent(ApplicationEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -694,23 +860,35 @@ class ApplicationCreatedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, application_definition_kind: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_type_name: str, + application_type_version: str, + application_definition_kind: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationCreatedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationCreated' # type: str self.application_type_name = application_type_name self.application_type_version = application_type_version self.application_definition_kind = application_definition_kind - self.kind = 'ApplicationCreated' class ApplicationDeletedEvent(ApplicationEvent): @@ -718,25 +896,44 @@ class ApplicationDeletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -745,62 +942,68 @@ class ApplicationDeletedEvent(ApplicationEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_type_name: str, + application_type_version: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationDeletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationDeleted' # type: str self.application_type_name = application_type_name self.application_type_version = application_type_version - self.kind = 'ApplicationDeleted' -class ApplicationDescription(Model): +class ApplicationDescription(msrest.serialization.Model): """Describes a Service Fabric application. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the application, including the - 'fabric:' URI scheme. + :param name: Required. The name of the application, including the 'fabric:' URI scheme. :type name: str - :param type_name: Required. The application type name as defined in the - application manifest. + :param type_name: Required. The application type name as defined in the application manifest. :type type_name: str - :param type_version: Required. The version of the application type as - defined in the application manifest. + :param type_version: Required. The version of the application type as defined in the + application manifest. :type type_version: str - :param parameter_list: List of application parameters with overridden - values from their default values specified in the application manifest. - :type parameter_list: - list[~azure.servicefabric.models.ApplicationParameter] - :param application_capacity: Describes capacity information for services - of this application. This description can be used for describing the - following. - - Reserving the capacity for the services on the nodes - - Limiting the total number of nodes that services of this application can - run on - - Limiting the custom capacity metrics to limit the total consumption of - this metric by the services of this application - :type application_capacity: - ~azure.servicefabric.models.ApplicationCapacityDescription - :param managed_application_identity: Managed application identity - description. + :param parameter_list: List of application parameters with overridden values from their default + values specified in the application manifest. + :type parameter_list: list[~azure.servicefabric.models.ApplicationParameter] + :param application_capacity: Describes capacity information for services of this application. + This description can be used for describing the following. + + + * Reserving the capacity for the services on the nodes + * Limiting the total number of nodes that services of this application can run on + * Limiting the custom capacity metrics to limit the total consumption of this metric by the + services of this application. + :type application_capacity: ~azure.servicefabric.models.ApplicationCapacityDescription + :param managed_application_identity: Managed application identity description. :type managed_application_identity: ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ @@ -820,7 +1023,17 @@ class ApplicationDescription(Model): 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__(self, *, name: str, type_name: str, type_version: str, parameter_list=None, application_capacity=None, managed_application_identity=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + type_name: str, + type_version: str, + parameter_list: Optional[List["ApplicationParameter"]] = None, + application_capacity: Optional["ApplicationCapacityDescription"] = None, + managed_application_identity: Optional["ManagedApplicationIdentityDescription"] = None, + **kwargs + ): super(ApplicationDescription, self).__init__(**kwargs) self.name = name self.type_name = type_name @@ -830,26 +1043,23 @@ def __init__(self, *, name: str, type_name: str, type_version: str, parameter_li self.managed_application_identity = managed_application_identity -class EntityHealth(Model): - """Health information common to all entities in the cluster. It contains the - aggregated health state, health events and unhealthy evaluation. +class EntityHealth(msrest.serialization.Model): + """Health information common to all entities in the cluster. It contains the aggregated health state, health events and unhealthy evaluation. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics """ @@ -860,7 +1070,15 @@ class EntityHealth(Model): 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + **kwargs + ): super(EntityHealth, self).__init__(**kwargs) self.aggregated_health_state = aggregated_health_state self.health_events = health_events @@ -869,36 +1087,29 @@ def __init__(self, *, aggregated_health_state=None, health_events=None, unhealth class ApplicationHealth(EntityHealth): - """Represents the health of the application. Contains the application - aggregated health state and the service and deployed application health - states. - - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + """Represents the health of the application. Contains the application aggregated health state and the service and deployed application health states. + + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: The name of the application, including the 'fabric:' URI - scheme. + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str - :param service_health_states: Service health states as found in the health - store. - :type service_health_states: - list[~azure.servicefabric.models.ServiceHealthState] - :param deployed_application_health_states: Deployed application health - states as found in the health store. + :param service_health_states: Service health states as found in the health store. + :type service_health_states: list[~azure.servicefabric.models.ServiceHealthState] + :param deployed_application_health_states: Deployed application health states as found in the + health store. :type deployed_application_health_states: list[~azure.servicefabric.models.DeployedApplicationHealthState] """ @@ -913,44 +1124,49 @@ class ApplicationHealth(EntityHealth): 'deployed_application_health_states': {'key': 'DeployedApplicationHealthStates', 'type': '[DeployedApplicationHealthState]'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, service_health_states=None, deployed_application_health_states=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + name: Optional[str] = None, + service_health_states: Optional[List["ServiceHealthState"]] = None, + deployed_application_health_states: Optional[List["DeployedApplicationHealthState"]] = None, + **kwargs + ): super(ApplicationHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.name = name self.service_health_states = service_health_states self.deployed_application_health_states = deployed_application_health_states -class HealthEvaluation(Model): - """Represents a health evaluation which describes the data and the algorithm - used by health manager to evaluate the health of an entity. +class HealthEvaluation(msrest.serialization.Model): + """Represents a health evaluation which describes the data and the algorithm used by health manager to evaluate the health of an entity. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationHealthEvaluation, ApplicationsHealthEvaluation, - ApplicationTypeApplicationsHealthEvaluation, - DeltaNodesCheckHealthEvaluation, DeployedApplicationHealthEvaluation, - DeployedApplicationsHealthEvaluation, - DeployedServicePackageHealthEvaluation, - DeployedServicePackagesHealthEvaluation, EventHealthEvaluation, - NodeHealthEvaluation, NodesHealthEvaluation, PartitionHealthEvaluation, - PartitionsHealthEvaluation, ReplicaHealthEvaluation, - ReplicasHealthEvaluation, ServiceHealthEvaluation, - ServicesHealthEvaluation, SystemApplicationHealthEvaluation, - UpgradeDomainDeltaNodesCheckHealthEvaluation, - UpgradeDomainNodesHealthEvaluation - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + sub-classes are: ApplicationHealthEvaluation, ApplicationTypeApplicationsHealthEvaluation, ApplicationsHealthEvaluation, DeltaNodesCheckHealthEvaluation, DeployedApplicationHealthEvaluation, DeployedApplicationsHealthEvaluation, DeployedServicePackageHealthEvaluation, DeployedServicePackagesHealthEvaluation, EventHealthEvaluation, NodeHealthEvaluation, NodeTypeNodesHealthEvaluation, NodesHealthEvaluation, PartitionHealthEvaluation, PartitionsHealthEvaluation, ReplicaHealthEvaluation, ReplicasHealthEvaluation, ServiceHealthEvaluation, ServicesHealthEvaluation, SystemApplicationHealthEvaluation, UpgradeDomainDeltaNodesCheckHealthEvaluation, UpgradeDomainNodesHealthEvaluation. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { @@ -958,49 +1174,56 @@ class HealthEvaluation(Model): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'Application': 'ApplicationHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation'} + 'kind': {'Application': 'ApplicationHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'NodeTypeNodes': 'NodeTypeNodesHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation'} } - def __init__(self, *, aggregated_health_state=None, description: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + **kwargs + ): super(HealthEvaluation, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.aggregated_health_state = aggregated_health_state self.description = description - self.kind = None class ApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for an application, containing information - about the data and the algorithm used by the health store to evaluate - health. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for an application, containing information about the data and the algorithm used by the health store to evaluate health. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the application. The types of the - unhealthy evaluations can be DeployedApplicationsHealthEvaluation, - ServicesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the application. The types of the unhealthy evaluations can be + DeployedApplicationsHealthEvaluation, ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -1008,27 +1231,33 @@ class ApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, application_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + application_name: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(ApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Application' # type: str self.application_name = application_name self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Application' -class ApplicationHealthPolicies(Model): - """Defines the application health policy map used to evaluate the health of an - application or one of its children entities. +class ApplicationHealthPolicies(msrest.serialization.Model): + """Defines the application health policy map used to evaluate the health of an application or one of its children entities. - :param application_health_policy_map: The wrapper that contains the map - with application health policies used to evaluate specific applications in - the cluster. + :param application_health_policy_map: The wrapper that contains the map with application health + policies used to evaluate specific applications in the cluster. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] """ @@ -1037,36 +1266,36 @@ class ApplicationHealthPolicies(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__(self, *, application_health_policy_map=None, **kwargs) -> None: + def __init__( + self, + *, + application_health_policy_map: Optional[List["ApplicationHealthPolicyMapItem"]] = None, + **kwargs + ): super(ApplicationHealthPolicies, self).__init__(**kwargs) self.application_health_policy_map = application_health_policy_map -class ApplicationHealthPolicy(Model): - """Defines a health policy used to evaluate the health of an application or - one of its children entities. +class ApplicationHealthPolicy(msrest.serialization.Model): + """Defines a health policy used to evaluate the health of an application or one of its children entities. - :param consider_warning_as_error: Indicates whether warnings are treated - with the same severity as errors. Default value: False . + :param consider_warning_as_error: Indicates whether warnings are treated with the same severity + as errors. :type consider_warning_as_error: bool - :param max_percent_unhealthy_deployed_applications: The maximum allowed - percentage of unhealthy deployed applications. Allowed values are Byte - values from zero to 100. - The percentage represents the maximum tolerated percentage of deployed - applications that can be unhealthy before the application is considered in - error. - This is calculated by dividing the number of unhealthy deployed - applications over the number of nodes where the application is currently - deployed on in the cluster. - The computation rounds up to tolerate one failure on small numbers of - nodes. Default percentage is zero. Default value: 0 . + :param max_percent_unhealthy_deployed_applications: The maximum allowed percentage of unhealthy + deployed applications. Allowed values are Byte values from zero to 100. + The percentage represents the maximum tolerated percentage of deployed applications that can + be unhealthy before the application is considered in error. + This is calculated by dividing the number of unhealthy deployed applications over the number + of nodes where the application is currently deployed on in the cluster. + The computation rounds up to tolerate one failure on small numbers of nodes. Default + percentage is zero. :type max_percent_unhealthy_deployed_applications: int - :param default_service_type_health_policy: The health policy used by - default to evaluate the health of a service type. - :type default_service_type_health_policy: - ~azure.servicefabric.models.ServiceTypeHealthPolicy - :param service_type_health_policy_map: The map with service type health - policy per service type name. The map is empty by default. + :param default_service_type_health_policy: The health policy used by default to evaluate the + health of a service type. + :type default_service_type_health_policy: ~azure.servicefabric.models.ServiceTypeHealthPolicy + :param service_type_health_policy_map: The map with service type health policy per service type + name. The map is empty by default. :type service_type_health_policy_map: list[~azure.servicefabric.models.ServiceTypeHealthPolicyMapItem] """ @@ -1078,7 +1307,15 @@ class ApplicationHealthPolicy(Model): 'service_type_health_policy_map': {'key': 'ServiceTypeHealthPolicyMap', 'type': '[ServiceTypeHealthPolicyMapItem]'}, } - def __init__(self, *, consider_warning_as_error: bool=False, max_percent_unhealthy_deployed_applications: int=0, default_service_type_health_policy=None, service_type_health_policy_map=None, **kwargs) -> None: + def __init__( + self, + *, + consider_warning_as_error: Optional[bool] = False, + max_percent_unhealthy_deployed_applications: Optional[int] = 0, + default_service_type_health_policy: Optional["ServiceTypeHealthPolicy"] = None, + service_type_health_policy_map: Optional[List["ServiceTypeHealthPolicyMapItem"]] = None, + **kwargs + ): super(ApplicationHealthPolicy, self).__init__(**kwargs) self.consider_warning_as_error = consider_warning_as_error self.max_percent_unhealthy_deployed_applications = max_percent_unhealthy_deployed_applications @@ -1086,16 +1323,16 @@ def __init__(self, *, consider_warning_as_error: bool=False, max_percent_unhealt self.service_type_health_policy_map = service_type_health_policy_map -class ApplicationHealthPolicyMapItem(Model): +class ApplicationHealthPolicyMapItem(msrest.serialization.Model): """Defines an item in ApplicationHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the application health policy map item. - This is the name of the application. + :param key: Required. The key of the application health policy map item. This is the name of + the application. :type key: str - :param value: Required. The value of the application health policy map - item. This is the ApplicationHealthPolicy for this application. + :param value: Required. The value of the application health policy map item. This is the + ApplicationHealthPolicy for this application. :type value: ~azure.servicefabric.models.ApplicationHealthPolicy """ @@ -1109,24 +1346,28 @@ class ApplicationHealthPolicyMapItem(Model): 'value': {'key': 'Value', 'type': 'ApplicationHealthPolicy'}, } - def __init__(self, *, key: str, value, **kwargs) -> None: + def __init__( + self, + *, + key: str, + value: "ApplicationHealthPolicy", + **kwargs + ): super(ApplicationHealthPolicyMapItem, self).__init__(**kwargs) self.key = key self.value = value -class ApplicationHealthPolicyMapObject(Model): - """Represents the map of application health policies for a ServiceFabric - cluster upgrade. +class ApplicationHealthPolicyMapObject(msrest.serialization.Model): + """Represents the map of application health policies for a ServiceFabric cluster upgrade. - :param application_health_policy_map: Defines a map that contains specific - application health policies for different applications. - Each entry specifies as key the application name and as value an - ApplicationHealthPolicy used to evaluate the application health. - If an application is not specified in the map, the application health - evaluation uses the ApplicationHealthPolicy found in its application - manifest or the default application health policy (if no health policy is - defined in the manifest). + :param application_health_policy_map: Defines a map that contains specific application health + policies for different applications. + Each entry specifies as key the application name and as value an ApplicationHealthPolicy used + to evaluate the application health. + If an application is not specified in the map, the application health evaluation uses the + ApplicationHealthPolicy found in its application manifest or the default application health + policy (if no health policy is defined in the manifest). The map is empty by default. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] @@ -1136,7 +1377,12 @@ class ApplicationHealthPolicyMapObject(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__(self, *, application_health_policy_map=None, **kwargs) -> None: + def __init__( + self, + *, + application_health_policy_map: Optional[List["ApplicationHealthPolicyMapItem"]] = None, + **kwargs + ): super(ApplicationHealthPolicyMapObject, self).__init__(**kwargs) self.application_health_policy_map = application_health_policy_map @@ -1146,25 +1392,44 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -1180,17 +1445,16 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -1204,11 +1468,11 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -1221,8 +1485,27 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_instance_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationHealthReportExpired' # type: str self.application_instance_id = application_instance_id self.source_id = source_id self.property = property @@ -1232,42 +1515,39 @@ def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, a self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'ApplicationHealthReportExpired' -class EntityHealthState(Model): - """A base type for the health state of various entities in the cluster. It - contains the aggregated health state. +class EntityHealthState(msrest.serialization.Model): + """A base type for the health state of various entities in the cluster. It contains the aggregated health state. - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState """ _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + **kwargs + ): super(EntityHealthState, self).__init__(**kwargs) self.aggregated_health_state = aggregated_health_state class ApplicationHealthState(EntityHealthState): - """Represents the health state of an application, which contains the - application identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param name: The name of the application, including the 'fabric:' URI - scheme. + """Represents the health state of an application, which contains the application identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str """ @@ -1276,18 +1556,23 @@ class ApplicationHealthState(EntityHealthState): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, name: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + name: Optional[str] = None, + **kwargs + ): super(ApplicationHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.name = name -class EntityHealthStateChunk(Model): - """A base type for the health state chunk of various entities in the cluster. - It contains the aggregated health state. +class EntityHealthStateChunk(msrest.serialization.Model): + """A base type for the health state chunk of various entities in the cluster. It contains the aggregated health state. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState """ @@ -1295,35 +1580,33 @@ class EntityHealthStateChunk(Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__(self, *, health_state=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + **kwargs + ): super(EntityHealthStateChunk, self).__init__(**kwargs) self.health_state = health_state class ApplicationHealthStateChunk(EntityHealthStateChunk): """Represents the health state chunk of a application. - The application health state chunk contains the application name, its - aggregated health state and any children services and deployed applications - that respect the filters in cluster health chunk query description. +The application health state chunk contains the application name, its aggregated health state and any children services and deployed applications that respect the filters in cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param application_type_name: The application type name as defined in the - application manifest. + :param application_type_name: The application type name as defined in the application manifest. :type application_type_name: str - :param service_health_state_chunks: The list of service health state - chunks in the cluster that respect the filters in the cluster health chunk - query description. - :type service_health_state_chunks: - ~azure.servicefabric.models.ServiceHealthStateChunkList - :param deployed_application_health_state_chunks: The list of deployed - application health state chunks in the cluster that respect the filters in - the cluster health chunk query description. + :param service_health_state_chunks: The list of service health state chunks in the cluster that + respect the filters in the cluster health chunk query description. + :type service_health_state_chunks: ~azure.servicefabric.models.ServiceHealthStateChunkList + :param deployed_application_health_state_chunks: The list of deployed application health state + chunks in the cluster that respect the filters in the cluster health chunk query description. :type deployed_application_health_state_chunks: ~azure.servicefabric.models.DeployedApplicationHealthStateChunkList """ @@ -1336,7 +1619,16 @@ class ApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_application_health_state_chunks': {'key': 'DeployedApplicationHealthStateChunks', 'type': 'DeployedApplicationHealthStateChunkList'}, } - def __init__(self, *, health_state=None, application_name: str=None, application_type_name: str=None, service_health_state_chunks=None, deployed_application_health_state_chunks=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + application_name: Optional[str] = None, + application_type_name: Optional[str] = None, + service_health_state_chunks: Optional["ServiceHealthStateChunkList"] = None, + deployed_application_health_state_chunks: Optional["DeployedApplicationHealthStateChunkList"] = None, + **kwargs + ): super(ApplicationHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.application_name = application_name self.application_type_name = application_type_name @@ -1344,12 +1636,11 @@ def __init__(self, *, health_state=None, application_name: str=None, application self.deployed_application_health_state_chunks = deployed_application_health_state_chunks -class EntityHealthStateChunkList(Model): - """A base type for the list of health state chunks found in the cluster. It - contains the total number of health states that match the input filters. +class EntityHealthStateChunkList(msrest.serialization.Model): + """A base type for the list of health state chunks found in the cluster. It contains the total number of health states that match the input filters. - :param total_count: Total number of entity health state objects that match - the specified filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match the specified + filters from the cluster health chunk query description. :type total_count: long """ @@ -1357,21 +1648,24 @@ class EntityHealthStateChunkList(Model): 'total_count': {'key': 'TotalCount', 'type': 'long'}, } - def __init__(self, *, total_count: int=None, **kwargs) -> None: + def __init__( + self, + *, + total_count: Optional[int] = None, + **kwargs + ): super(EntityHealthStateChunkList, self).__init__(**kwargs) self.total_count = total_count class ApplicationHealthStateChunkList(EntityHealthStateChunkList): - """The list of application health state chunks in the cluster that respect the - input filters in the chunk query. Returned by get cluster health state - chunks query. + """The list of application health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param total_count: Total number of entity health state objects that match - the specified filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match the specified + filters from the cluster health chunk query description. :type total_count: long - :param items: The list of application health state chunks that respect the - input filters in the chunk query. + :param items: The list of application health state chunks that respect the input filters in the + chunk query. :type items: list[~azure.servicefabric.models.ApplicationHealthStateChunk] """ @@ -1380,87 +1674,81 @@ class ApplicationHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[ApplicationHealthStateChunk]'}, } - def __init__(self, *, total_count: int=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + total_count: Optional[int] = None, + items: Optional[List["ApplicationHealthStateChunk"]] = None, + **kwargs + ): super(ApplicationHealthStateChunkList, self).__init__(total_count=total_count, **kwargs) self.items = items -class ApplicationHealthStateFilter(Model): - """Defines matching criteria to determine whether a application should be - included in the cluster health chunk. - One filter can match zero, one or multiple applications, depending on its - properties. +class ApplicationHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a application should be included in the cluster health chunk. +One filter can match zero, one or multiple applications, depending on its properties. - :param application_name_filter: The name of the application that matches - the filter, as a fabric uri. The filter is applied only to the specified - application, if it exists. - If the application doesn't exist, no application is returned in the + :param application_name_filter: The name of the application that matches the filter, as a + fabric uri. The filter is applied only to the specified application, if it exists. + If the application doesn't exist, no application is returned in the cluster health chunk based + on this filter. + If the application exists, it is included in the cluster health chunk if it respects the other + filter properties. + If not specified, all applications are matched against the other filter members, like health + state filter. + :type application_name_filter: str + :param application_type_name_filter: The name of the application type that matches the filter. + If specified, the filter is applied only to applications of the selected application type, if + any exists. + If no applications of the specified application type exists, no application is returned in the cluster health chunk based on this filter. - If the application exists, it is included in the cluster health chunk if + Each application of the specified application type is included in the cluster health chunk if it respects the other filter properties. - If not specified, all applications are matched against the other filter - members, like health state filter. - :type application_name_filter: str - :param application_type_name_filter: The name of the application type that - matches the filter. - If specified, the filter is applied only to applications of the selected - application type, if any exists. - If no applications of the specified application type exists, no - application is returned in the cluster health chunk based on this filter. - Each application of the specified application type is included in the - cluster health chunk if it respects the other filter properties. - If not specified, all applications are matched against the other filter - members, like health state filter. + If not specified, all applications are matched against the other filter members, like health + state filter. :type application_type_name_filter: str - :param health_state_filter: The filter for the health state of the - applications. It allows selecting applications if they match the desired - health states. - The possible values are integer value of one of the following health - states. Only applications that match the filter are returned. All - applications are used to evaluate the cluster aggregated health state. - If not specified, default value is None, unless the application name or - the application type name are specified. If the filter has default value - and application name is specified, the matching application is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches applications with - HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the applications. It allows + selecting applications if they match the desired health states. + The possible values are integer value of one of the following health states. Only applications + that match the filter are returned. All applications are used to evaluate the cluster + aggregated health state. + If not specified, default value is None, unless the application name or the application type + name are specified. If the filter has default value and application name is specified, the + matching application is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches applications with HealthState value of OK + (2) and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int - :param service_filters: Defines a list of filters that specify which - services to be included in the returned cluster health chunk as children - of the application. The services are returned only if the parent - application matches a filter. - If the list is empty, no services are returned. All the services are used - to evaluate the parent application aggregated health state, regardless of - the input filters. + :param service_filters: Defines a list of filters that specify which services to be included in + the returned cluster health chunk as children of the application. The services are returned + only if the parent application matches a filter. + If the list is empty, no services are returned. All the services are used to evaluate the + parent application aggregated health state, regardless of the input filters. The application filter may specify multiple service filters. - For example, it can specify a filter to return all services with health - state Error and another filter to always include a service identified by - its service name. - :type service_filters: - list[~azure.servicefabric.models.ServiceHealthStateFilter] - :param deployed_application_filters: Defines a list of filters that - specify which deployed applications to be included in the returned cluster - health chunk as children of the application. The deployed applications are - returned only if the parent application matches a filter. - If the list is empty, no deployed applications are returned. All the - deployed applications are used to evaluate the parent application - aggregated health state, regardless of the input filters. + For example, it can specify a filter to return all services with health state Error and + another filter to always include a service identified by its service name. + :type service_filters: list[~azure.servicefabric.models.ServiceHealthStateFilter] + :param deployed_application_filters: Defines a list of filters that specify which deployed + applications to be included in the returned cluster health chunk as children of the + application. The deployed applications are returned only if the parent application matches a + filter. + If the list is empty, no deployed applications are returned. All the deployed applications are + used to evaluate the parent application aggregated health state, regardless of the input + filters. The application filter may specify multiple deployed application filters. - For example, it can specify a filter to return all deployed applications - with health state Error and another filter to always include a deployed - application on a specified node. + For example, it can specify a filter to return all deployed applications with health state + Error and another filter to always include a deployed application on a specified node. :type deployed_application_filters: list[~azure.servicefabric.models.DeployedApplicationHealthStateFilter] """ @@ -1473,7 +1761,16 @@ class ApplicationHealthStateFilter(Model): 'deployed_application_filters': {'key': 'DeployedApplicationFilters', 'type': '[DeployedApplicationHealthStateFilter]'}, } - def __init__(self, *, application_name_filter: str=None, application_type_name_filter: str=None, health_state_filter: int=0, service_filters=None, deployed_application_filters=None, **kwargs) -> None: + def __init__( + self, + *, + application_name_filter: Optional[str] = None, + application_type_name_filter: Optional[str] = None, + health_state_filter: Optional[int] = 0, + service_filters: Optional[List["ServiceHealthStateFilter"]] = None, + deployed_application_filters: Optional[List["DeployedApplicationHealthStateFilter"]] = None, + **kwargs + ): super(ApplicationHealthStateFilter, self).__init__(**kwargs) self.application_name_filter = application_name_filter self.application_type_name_filter = application_type_name_filter @@ -1482,41 +1779,38 @@ def __init__(self, *, application_name_filter: str=None, application_type_name_f self.deployed_application_filters = deployed_application_filters -class ApplicationInfo(Model): +class ApplicationInfo(msrest.serialization.Model): """Information about a Service Fabric application. - :param id: The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to - identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param id: The identity of the application. This is an encoded representation of the + application name. This is used in the REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI - scheme. + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str - :param type_name: The application type name as defined in the application - manifest. + :param type_name: The application type name as defined in the application manifest. :type type_name: str - :param type_version: The version of the application type as defined in the - application manifest. + :param type_version: The version of the application type as defined in the application + manifest. :type type_version: str - :param status: The status of the application. Possible values include: - 'Invalid', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :param status: The status of the application. Possible values include: "Invalid", "Ready", + "Upgrading", "Creating", "Deleting", "Failed". :type status: str or ~azure.servicefabric.models.ApplicationStatus - :param parameters: List of application parameters with overridden values - from their default values specified in the application manifest. + :param parameters: List of application parameters with overridden values from their default + values specified in the application manifest. :type parameters: list[~azure.servicefabric.models.ApplicationParameter] - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param application_definition_kind: The mechanism used to define a Service - Fabric application. Possible values include: 'Invalid', - 'ServiceFabricApplicationDescription', 'Compose' - :type application_definition_kind: str or - ~azure.servicefabric.models.ApplicationDefinitionKind + :param application_definition_kind: The mechanism used to define a Service Fabric application. + Possible values include: "Invalid", "ServiceFabricApplicationDescription", "Compose". + :type application_definition_kind: str or ~azure.servicefabric.models.ApplicationDefinitionKind + :param managed_application_identity: Managed application identity description. + :type managed_application_identity: + ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ _attribute_map = { @@ -1528,9 +1822,23 @@ class ApplicationInfo(Model): 'parameters': {'key': 'Parameters', 'type': '[ApplicationParameter]'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, + 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__(self, *, id: str=None, name: str=None, type_name: str=None, type_version: str=None, status=None, parameters=None, health_state=None, application_definition_kind=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + type_name: Optional[str] = None, + type_version: Optional[str] = None, + status: Optional[Union[str, "ApplicationStatus"]] = None, + parameters: Optional[List["ApplicationParameter"]] = None, + health_state: Optional[Union[str, "HealthState"]] = None, + application_definition_kind: Optional[Union[str, "ApplicationDefinitionKind"]] = None, + managed_application_identity: Optional["ManagedApplicationIdentityDescription"] = None, + **kwargs + ): super(ApplicationInfo, self).__init__(**kwargs) self.id = id self.name = name @@ -1540,39 +1848,31 @@ def __init__(self, *, id: str=None, name: str=None, type_name: str=None, type_ve self.parameters = parameters self.health_state = health_state self.application_definition_kind = application_definition_kind + self.managed_application_identity = managed_application_identity -class ApplicationLoadInfo(Model): +class ApplicationLoadInfo(msrest.serialization.Model): """Load Information about a Service Fabric application. - :param id: The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to - identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param id: The identity of the application. This is an encoded representation of the + application name. This is used in the REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type id: str :param minimum_nodes: The minimum number of nodes for this application. - It is the number of nodes where Service Fabric will reserve Capacity in - the cluster which equals to ReservedLoad * MinimumNodes for this - Application instance. - For applications that do not have application capacity defined this value - will be zero. + It is the number of nodes where Service Fabric will reserve Capacity in the cluster which + equals to ReservedLoad * MinimumNodes for this Application instance. + For applications that do not have application capacity defined this value will be zero. :type minimum_nodes: long - :param maximum_nodes: The maximum number of nodes where this application - can be instantiated. + :param maximum_nodes: The maximum number of nodes where this application can be instantiated. It is the number of nodes this application is allowed to span. - For applications that do not have application capacity defined this value - will be zero. + For applications that do not have application capacity defined this value will be zero. :type maximum_nodes: long - :param node_count: The number of nodes on which this application is - instantiated. - For applications that do not have application capacity defined this value - will be zero. + :param node_count: The number of nodes on which this application is instantiated. + For applications that do not have application capacity defined this value will be zero. :type node_count: long - :param application_load_metric_information: List of application load - metric information. + :param application_load_metric_information: List of application load metric information. :type application_load_metric_information: list[~azure.servicefabric.models.ApplicationLoadMetricInformation] """ @@ -1585,7 +1885,16 @@ class ApplicationLoadInfo(Model): 'application_load_metric_information': {'key': 'ApplicationLoadMetricInformation', 'type': '[ApplicationLoadMetricInformation]'}, } - def __init__(self, *, id: str=None, minimum_nodes: int=None, maximum_nodes: int=None, node_count: int=None, application_load_metric_information=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + minimum_nodes: Optional[int] = None, + maximum_nodes: Optional[int] = None, + node_count: Optional[int] = None, + application_load_metric_information: Optional[List["ApplicationLoadMetricInformation"]] = None, + **kwargs + ): super(ApplicationLoadInfo, self).__init__(**kwargs) self.id = id self.minimum_nodes = minimum_nodes @@ -1594,26 +1903,20 @@ def __init__(self, *, id: str=None, minimum_nodes: int=None, maximum_nodes: int= self.application_load_metric_information = application_load_metric_information -class ApplicationLoadMetricInformation(Model): - """Describes load information for a custom resource balancing metric. This can - be used to limit the total consumption of this metric by the services of - this application. +class ApplicationLoadMetricInformation(msrest.serialization.Model): + """Describes load information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. :param name: The name of the metric. :type name: str - :param reservation_capacity: This is the capacity reserved in the cluster - for the application. + :param reservation_capacity: This is the capacity reserved in the cluster for the application. It's the product of NodeReservationCapacity and MinimumNodes. If set to zero, no capacity is reserved for this metric. - When setting application capacity or when updating application capacity - this value must be smaller than or equal to MaximumCapacity for each - metric. + When setting application capacity or when updating application capacity this value must be + smaller than or equal to MaximumCapacity for each metric. :type reservation_capacity: long - :param application_capacity: Total capacity for this metric in this - application instance. + :param application_capacity: Total capacity for this metric in this application instance. :type application_capacity: long - :param application_load: Current load for this metric in this application - instance. + :param application_load: Current load for this metric in this application instance. :type application_load: long """ @@ -1624,7 +1927,15 @@ class ApplicationLoadMetricInformation(Model): 'application_load': {'key': 'ApplicationLoad', 'type': 'long'}, } - def __init__(self, *, name: str=None, reservation_capacity: int=None, application_capacity: int=None, application_load: int=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + reservation_capacity: Optional[int] = None, + application_capacity: Optional[int] = None, + application_load: Optional[int] = None, + **kwargs + ): super(ApplicationLoadMetricInformation, self).__init__(**kwargs) self.name = name self.reservation_capacity = reservation_capacity @@ -1632,46 +1943,35 @@ def __init__(self, *, name: str=None, reservation_capacity: int=None, applicatio self.application_load = application_load -class ApplicationMetricDescription(Model): - """Describes capacity information for a custom resource balancing metric. This - can be used to limit the total consumption of this metric by the services - of this application. +class ApplicationMetricDescription(msrest.serialization.Model): + """Describes capacity information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. :param name: The name of the metric. :type name: str - :param maximum_capacity: The maximum node capacity for Service Fabric - application. - This is the maximum Load for an instance of this application on a single - node. Even if the capacity of node is greater than this value, Service - Fabric will limit the total load of services within the application on - each node to this value. + :param maximum_capacity: The maximum node capacity for Service Fabric application. + This is the maximum Load for an instance of this application on a single node. Even if the + capacity of node is greater than this value, Service Fabric will limit the total load of + services within the application on each node to this value. If set to zero, capacity for this metric is unlimited on each node. - When creating a new application with application capacity defined, the - product of MaximumNodes and this value must always be smaller than or - equal to TotalApplicationCapacity. - When updating existing application with application capacity, the product - of MaximumNodes and this value must always be smaller than or equal to - TotalApplicationCapacity. + When creating a new application with application capacity defined, the product of MaximumNodes + and this value must always be smaller than or equal to TotalApplicationCapacity. + When updating existing application with application capacity, the product of MaximumNodes and + this value must always be smaller than or equal to TotalApplicationCapacity. :type maximum_capacity: long - :param reservation_capacity: The node reservation capacity for Service - Fabric application. - This is the amount of load which is reserved on nodes which have instances - of this application. - If MinimumNodes is specified, then the product of these values will be the - capacity reserved in the cluster for the application. + :param reservation_capacity: The node reservation capacity for Service Fabric application. + This is the amount of load which is reserved on nodes which have instances of this + application. + If MinimumNodes is specified, then the product of these values will be the capacity reserved + in the cluster for the application. If set to zero, no capacity is reserved for this metric. - When setting application capacity or when updating application capacity; - this value must be smaller than or equal to MaximumCapacity for each - metric. + When setting application capacity or when updating application capacity; this value must be + smaller than or equal to MaximumCapacity for each metric. :type reservation_capacity: long - :param total_application_capacity: The total metric capacity for Service - Fabric application. - This is the total metric capacity for this application in the cluster. - Service Fabric will try to limit the sum of loads of services within the - application to this value. - When creating a new application with application capacity defined, the - product of MaximumNodes and MaximumCapacity must always be smaller than or - equal to this value. + :param total_application_capacity: The total metric capacity for Service Fabric application. + This is the total metric capacity for this application in the cluster. Service Fabric will try + to limit the sum of loads of services within the application to this value. + When creating a new application with application capacity defined, the product of MaximumNodes + and MaximumCapacity must always be smaller than or equal to this value. :type total_application_capacity: long """ @@ -1682,7 +1982,15 @@ class ApplicationMetricDescription(Model): 'total_application_capacity': {'key': 'TotalApplicationCapacity', 'type': 'long'}, } - def __init__(self, *, name: str=None, maximum_capacity: int=None, reservation_capacity: int=None, total_application_capacity: int=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + maximum_capacity: Optional[int] = None, + reservation_capacity: Optional[int] = None, + total_application_capacity: Optional[int] = None, + **kwargs + ): super(ApplicationMetricDescription, self).__init__(**kwargs) self.name = name self.maximum_capacity = maximum_capacity @@ -1690,19 +1998,16 @@ def __init__(self, *, name: str=None, maximum_capacity: int=None, reservation_ca self.total_application_capacity = total_application_capacity -class ApplicationNameInfo(Model): +class ApplicationNameInfo(msrest.serialization.Model): """Information about the application name. - :param id: The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to - identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param id: The identity of the application. This is an encoded representation of the + application name. This is used in the REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI - scheme. + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str """ @@ -1711,7 +2016,13 @@ class ApplicationNameInfo(Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, *, id: str=None, name: str=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + **kwargs + ): super(ApplicationNameInfo, self).__init__(**kwargs) self.id = id self.name = name @@ -1722,25 +2033,44 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -1756,17 +2086,16 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -1780,11 +2109,11 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -1797,8 +2126,27 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_instance_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationNewHealthReport' # type: str self.application_instance_id = application_instance_id self.source_id = source_id self.property = property @@ -1808,12 +2156,10 @@ def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, a self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'ApplicationNewHealthReport' -class ApplicationParameter(Model): - """Describes an application parameter override to be applied when creating or - upgrading an application. +class ApplicationParameter(msrest.serialization.Model): + """Describes an application parameter override to be applied when creating or upgrading an application. All required parameters must be populated in order to send to Azure. @@ -1833,7 +2179,13 @@ class ApplicationParameter(Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__(self, *, key: str, value: str, **kwargs) -> None: + def __init__( + self, + *, + key: str, + value: str, + **kwargs + ): super(ApplicationParameter, self).__init__(**kwargs) self.key = key self.value = value @@ -1844,32 +2196,50 @@ class ApplicationProcessExitedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param service_name: Required. Name of Service. :type service_name: str :param service_package_name: Required. Name of Service package. :type service_package_name: str - :param service_package_activation_id: Required. Activation Id of Service - package. + :param service_package_activation_id: Required. Activation Id of Service package. :type service_package_activation_id: str :param is_exclusive: Required. Indicates IsExclusive flag. :type is_exclusive: bool @@ -1885,17 +2255,16 @@ class ApplicationProcessExitedEvent(ApplicationEvent): :type host_id: str :param exit_code: Required. Exit code of process. :type exit_code: long - :param unexpected_termination: Required. Indicates if termination is - unexpected. + :param unexpected_termination: Required. Indicates if termination is unexpected. :type unexpected_termination: bool :param start_time: Required. Start time of process. - :type start_time: datetime + :type start_time: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'service_name': {'required': True}, 'service_package_name': {'required': True}, @@ -1912,11 +2281,11 @@ class ApplicationProcessExitedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, @@ -1932,8 +2301,30 @@ class ApplicationProcessExitedEvent(ApplicationEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_name: str, service_package_name: str, service_package_activation_id: str, is_exclusive: bool, code_package_name: str, entry_point_type: str, exe_name: str, process_id: int, host_id: str, exit_code: int, unexpected_termination: bool, start_time, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + service_name: str, + service_package_name: str, + service_package_activation_id: str, + is_exclusive: bool, + code_package_name: str, + entry_point_type: str, + exe_name: str, + process_id: int, + host_id: str, + exit_code: int, + unexpected_termination: bool, + start_time: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationProcessExitedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationProcessExited' # type: str self.service_name = service_name self.service_package_name = service_package_name self.service_package_activation_id = service_package_activation_id @@ -1946,50 +2337,44 @@ def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, s self.exit_code = exit_code self.unexpected_termination = unexpected_termination self.start_time = start_time - self.kind = 'ApplicationProcessExited' -class ApplicationResourceDescription(Model): +class ApplicationResourceDescription(msrest.serialization.Model): """This type describes a application resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the Application resource. :type name: str + :param identity: Describes the identity of the application. + :type identity: ~azure.servicefabric.models.IdentityDescription :param description: User readable description of the application. :type description: str - :param services: Describes the services in the application. This property - is used to create or modify services of the application. On get only the - name of the service is returned. The service description can be obtained - by querying for the service resource. - :type services: - list[~azure.servicefabric.models.ServiceResourceDescription] - :param diagnostics: Describes the diagnostics definition and usage for an - application resource. + :param services: Describes the services in the application. This property is used to create or + modify services of the application. On get only the name of the service is returned. The + service description can be obtained by querying for the service resource. + :type services: list[~azure.servicefabric.models.ServiceResourceDescription] + :param diagnostics: Describes the diagnostics definition and usage for an application resource. :type diagnostics: ~azure.servicefabric.models.DiagnosticsDescription - :param debug_params: Internal - used by Visual Studio to setup the - debugging session on the local development environment. + :param debug_params: Internal - used by Visual Studio to setup the debugging session on the + local development environment. :type debug_params: str :ivar service_names: Names of the services in the application. :vartype service_names: list[str] - :ivar status: Status of the application. Possible values include: - 'Unknown', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the application. Possible values include: "Unknown", "Ready", + "Upgrading", "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the application. + :ivar status_details: Gives additional information about the current status of the application. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :ivar health_state: Describes the health state of an application resource. Possible values + include: "Invalid", "Ok", "Warning", "Error", "Unknown". :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the application's health state is not - 'Ok', this additional details from service fabric Health Manager for the - user to know why the application is marked unhealthy. + :ivar unhealthy_evaluation: When the application's health state is not 'Ok', this additional + details from service fabric Health Manager for the user to know why the application is marked + unhealthy. :vartype unhealthy_evaluation: str - :param identity: Describes the identity of the application. - :type identity: ~azure.servicefabric.models.IdentityDescription """ _validation = { @@ -2003,6 +2388,7 @@ class ApplicationResourceDescription(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, + 'identity': {'key': 'identity', 'type': 'IdentityDescription'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'services': {'key': 'properties.services', 'type': '[ServiceResourceDescription]'}, 'diagnostics': {'key': 'properties.diagnostics', 'type': 'DiagnosticsDescription'}, @@ -2012,12 +2398,22 @@ class ApplicationResourceDescription(Model): 'status_details': {'key': 'properties.statusDetails', 'type': 'str'}, 'health_state': {'key': 'properties.healthState', 'type': 'str'}, 'unhealthy_evaluation': {'key': 'properties.unhealthyEvaluation', 'type': 'str'}, - 'identity': {'key': 'identity', 'type': 'IdentityDescription'}, } - def __init__(self, *, name: str, description: str=None, services=None, diagnostics=None, debug_params: str=None, identity=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + identity: Optional["IdentityDescription"] = None, + description: Optional[str] = None, + services: Optional[List["ServiceResourceDescription"]] = None, + diagnostics: Optional["DiagnosticsDescription"] = None, + debug_params: Optional[str] = None, + **kwargs + ): super(ApplicationResourceDescription, self).__init__(**kwargs) self.name = name + self.identity = identity self.description = description self.services = services self.diagnostics = diagnostics @@ -2027,56 +2423,45 @@ def __init__(self, *, name: str, description: str=None, services=None, diagnosti self.status_details = None self.health_state = None self.unhealthy_evaluation = None - self.identity = identity -class ApplicationResourceUpgradeProgressInfo(Model): +class ApplicationResourceUpgradeProgressInfo(msrest.serialization.Model): """This type describes an application resource upgrade. :param name: Name of the Application resource. :type name: str - :param target_application_type_version: The target application version for - the application upgrade. + :param target_application_type_version: The target application version for the application + upgrade. :type target_application_type_version: str - :param start_timestamp_utc: The estimated UTC datetime when the upgrade - started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. :type start_timestamp_utc: str - :param upgrade_state: The state of the application resource upgrade. - Possible values include: 'Invalid', 'ProvisioningTarget', - 'RollingForward', 'UnprovisioningCurrent', 'CompletedRollforward', - 'RollingBack', 'UnprovisioningTarget', 'CompletedRollback', 'Failed' - :type upgrade_state: str or - ~azure.servicefabric.models.ApplicationResourceUpgradeState - :param percent_completed: The estimated percent of replicas are completed - in the upgrade. + :param upgrade_state: The state of the application resource upgrade. Possible values include: + "Invalid", "ProvisioningTarget", "RollingForward", "UnprovisioningCurrent", + "CompletedRollforward", "RollingBack", "UnprovisioningTarget", "CompletedRollback", "Failed". + :type upgrade_state: str or ~azure.servicefabric.models.ApplicationResourceUpgradeState + :param percent_completed: The estimated percent of replicas are completed in the upgrade. :type percent_completed: str :param service_upgrade_progress: List of service upgrade progresses. - :type service_upgrade_progress: - list[~azure.servicefabric.models.ServiceUpgradeProgress] - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "Monitored" . - :type rolling_upgrade_mode: str or - ~azure.servicefabric.models.RollingUpgradeMode - :param upgrade_duration: The estimated amount of time that the overall - upgrade elapsed. It is first interpreted as a string representing an ISO - 8601 duration. If that fails, then it is interpreted as a number - representing the total number of milliseconds. Default value: "PT0H2M0S" . + :type service_upgrade_progress: list[~azure.servicefabric.models.ServiceUpgradeProgress] + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: "Monitored". + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.RollingUpgradeMode + :param upgrade_duration: The estimated amount of time that the overall upgrade elapsed. It is + first interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type upgrade_duration: str - :param application_upgrade_status_details: Additional detailed information - about the status of the pending upgrade. + :param application_upgrade_status_details: Additional detailed information about the status of + the pending upgrade. :type application_upgrade_status_details: str - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). Default value: 42949672925 . + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade - failed and FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and + FailureAction was executed. :type failure_timestamp_utc: str """ @@ -2094,7 +2479,22 @@ class ApplicationResourceUpgradeProgressInfo(Model): 'failure_timestamp_utc': {'key': 'FailureTimestampUtc', 'type': 'str'}, } - def __init__(self, *, name: str=None, target_application_type_version: str=None, start_timestamp_utc: str=None, upgrade_state=None, percent_completed: str=None, service_upgrade_progress=None, rolling_upgrade_mode="Monitored", upgrade_duration: str="PT0H2M0S", application_upgrade_status_details: str=None, upgrade_replica_set_check_timeout_in_seconds: int=42949672925, failure_timestamp_utc: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + target_application_type_version: Optional[str] = None, + start_timestamp_utc: Optional[str] = None, + upgrade_state: Optional[Union[str, "ApplicationResourceUpgradeState"]] = None, + percent_completed: Optional[str] = None, + service_upgrade_progress: Optional[List["ServiceUpgradeProgress"]] = None, + rolling_upgrade_mode: Optional[Union[str, "RollingUpgradeMode"]] = "Monitored", + upgrade_duration: Optional[str] = "PT0H2M0S", + application_upgrade_status_details: Optional[str] = None, + upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, + failure_timestamp_utc: Optional[str] = None, + **kwargs + ): super(ApplicationResourceUpgradeProgressInfo, self).__init__(**kwargs) self.name = name self.target_application_type_version = target_application_type_version @@ -2109,18 +2509,17 @@ def __init__(self, *, name: str=None, target_application_type_version: str=None, self.failure_timestamp_utc = failure_timestamp_utc -class VolumeReference(Model): +class VolumeReference(msrest.serialization.Model): """Describes a reference to a volume resource. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the volume being referenced. :type name: str - :param read_only: The flag indicating whether the volume is read only. - Default is 'false'. + :param read_only: The flag indicating whether the volume is read only. Default is 'false'. :type read_only: bool - :param destination_path: Required. The path within the container at which - the volume should be mounted. Only valid path characters are allowed. + :param destination_path: Required. The path within the container at which the volume should be + mounted. Only valid path characters are allowed. :type destination_path: str """ @@ -2135,7 +2534,14 @@ class VolumeReference(Model): 'destination_path': {'key': 'destinationPath', 'type': 'str'}, } - def __init__(self, *, name: str, destination_path: str, read_only: bool=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + destination_path: str, + read_only: Optional[bool] = None, + **kwargs + ): super(VolumeReference, self).__init__(**kwargs) self.name = name self.read_only = read_only @@ -2149,14 +2555,13 @@ class ApplicationScopedVolume(VolumeReference): :param name: Required. Name of the volume being referenced. :type name: str - :param read_only: The flag indicating whether the volume is read only. - Default is 'false'. + :param read_only: The flag indicating whether the volume is read only. Default is 'false'. :type read_only: bool - :param destination_path: Required. The path within the container at which - the volume should be mounted. Only valid path characters are allowed. + :param destination_path: Required. The path within the container at which the volume should be + mounted. Only valid path characters are allowed. :type destination_path: str - :param creation_parameters: Required. Describes parameters for creating - application-scoped volumes. + :param creation_parameters: Required. Describes parameters for creating application-scoped + volumes. :type creation_parameters: ~azure.servicefabric.models.ApplicationScopedVolumeCreationParameters """ @@ -2174,24 +2579,32 @@ class ApplicationScopedVolume(VolumeReference): 'creation_parameters': {'key': 'creationParameters', 'type': 'ApplicationScopedVolumeCreationParameters'}, } - def __init__(self, *, name: str, destination_path: str, creation_parameters, read_only: bool=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + destination_path: str, + creation_parameters: "ApplicationScopedVolumeCreationParameters", + read_only: Optional[bool] = None, + **kwargs + ): super(ApplicationScopedVolume, self).__init__(name=name, read_only=read_only, destination_path=destination_path, **kwargs) self.creation_parameters = creation_parameters -class ApplicationScopedVolumeCreationParameters(Model): +class ApplicationScopedVolumeCreationParameters(msrest.serialization.Model): """Describes parameters for creating application-scoped volumes. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: - ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk + sub-classes are: ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk. All required parameters must be populated in order to send to Azure. + :param kind: Required. Specifies the application-scoped volume kind.Constant filled by server. + Possible values include: "ServiceFabricVolumeDisk". + :type kind: str or ~azure.servicefabric.models.ApplicationScopedVolumeKind :param description: User readable description of the volume. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { @@ -2199,32 +2612,36 @@ class ApplicationScopedVolumeCreationParameters(Model): } _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, } _subtype_map = { 'kind': {'ServiceFabricVolumeDisk': 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk'} } - def __init__(self, *, description: str=None, **kwargs) -> None: + def __init__( + self, + *, + description: Optional[str] = None, + **kwargs + ): super(ApplicationScopedVolumeCreationParameters, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.description = description - self.kind = None class ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk(ApplicationScopedVolumeCreationParameters): - """Describes parameters for creating application-scoped volumes provided by - Service Fabric Volume Disks. + """Describes parameters for creating application-scoped volumes provided by Service Fabric Volume Disks. All required parameters must be populated in order to send to Azure. + :param kind: Required. Specifies the application-scoped volume kind.Constant filled by server. + Possible values include: "ServiceFabricVolumeDisk". + :type kind: str or ~azure.servicefabric.models.ApplicationScopedVolumeKind :param description: User readable description of the volume. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param size_disk: Required. Volume size. Possible values include: 'Small', - 'Medium', 'Large' + :param size_disk: Required. Volume size. Possible values include: "Small", "Medium", "Large". :type size_disk: str or ~azure.servicefabric.models.SizeTypes """ @@ -2234,45 +2651,54 @@ class ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk(Applicati } _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, 'size_disk': {'key': 'sizeDisk', 'type': 'str'}, } - def __init__(self, *, size_disk, description: str=None, **kwargs) -> None: + def __init__( + self, + *, + size_disk: Union[str, "SizeTypes"], + description: Optional[str] = None, + **kwargs + ): super(ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk, self).__init__(description=description, **kwargs) + self.kind = 'ServiceFabricVolumeDisk' # type: str self.size_disk = size_disk - self.kind = 'ServiceFabricVolumeDisk' class ApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for applications, containing health - evaluations for each unhealthy application that impacted current aggregated - health state. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for applications, containing health evaluations for each unhealthy application that impacted current aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_applications: Maximum allowed percentage of - unhealthy applications from the ClusterHealthPolicy. + :param max_percent_unhealthy_applications: Maximum allowed percentage of unhealthy applications + from the ClusterHealthPolicy. :type max_percent_unhealthy_applications: int :param total_count: Total number of applications from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - ApplicationHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy ApplicationHealthEvaluation that impacted the aggregated + health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -2280,59 +2706,65 @@ class ApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_applications: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + max_percent_unhealthy_applications: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(ApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Applications' # type: str self.max_percent_unhealthy_applications = max_percent_unhealthy_applications self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Applications' class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for applications of a particular application - type. The application type applications evaluation can be returned when - cluster health evaluation returns unhealthy aggregated health state, either - Error or Warning. It contains health evaluations for each unhealthy - application of the included application type that impacted current - aggregated health state. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for applications of a particular application type. The application type applications evaluation can be returned when cluster health evaluation returns unhealthy aggregated health state, either Error or Warning. It contains health evaluations for each unhealthy application of the included application type that impacted current aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param application_type_name: The application type name as defined in the - application manifest. + :param application_type_name: The application type name as defined in the application manifest. :type application_type_name: str - :param max_percent_unhealthy_applications: Maximum allowed percentage of - unhealthy applications for the application type, specified as an entry in - ApplicationTypeHealthPolicyMap. + :param max_percent_unhealthy_applications: Maximum allowed percentage of unhealthy applications + for the application type, specified as an entry in ApplicationTypeHealthPolicyMap. :type max_percent_unhealthy_applications: int - :param total_count: Total number of applications of the application type - found in the health store. + :param total_count: Total number of applications of the application type found in the health + store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - ApplicationHealthEvaluation of this application type that impacted the - aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy ApplicationHealthEvaluation of this application type that + impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -2340,36 +2772,45 @@ class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, application_type_name: str=None, max_percent_unhealthy_applications: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + application_type_name: Optional[str] = None, + max_percent_unhealthy_applications: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(ApplicationTypeApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'ApplicationTypeApplications' # type: str self.application_type_name = application_type_name self.max_percent_unhealthy_applications = max_percent_unhealthy_applications self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'ApplicationTypeApplications' -class ApplicationTypeHealthPolicyMapItem(Model): +class ApplicationTypeHealthPolicyMapItem(msrest.serialization.Model): """Defines an item in ApplicationTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the application type health policy map - item. This is the name of the application type. + :param key: Required. The key of the application type health policy map item. This is the name + of the application type. :type key: str - :param value: Required. The value of the application type health policy - map item. - The max percent unhealthy applications allowed for the application type. - Must be between zero and 100. + :param value: Required. The value of the application type health policy map item. + The max percent unhealthy applications allowed for the application type. Must be between zero + and 100. :type value: int """ @@ -2383,20 +2824,25 @@ class ApplicationTypeHealthPolicyMapItem(Model): 'value': {'key': 'Value', 'type': 'int'}, } - def __init__(self, *, key: str, value: int, **kwargs) -> None: + def __init__( + self, + *, + key: str, + value: int, + **kwargs + ): super(ApplicationTypeHealthPolicyMapItem, self).__init__(**kwargs) self.key = key self.value = value -class ApplicationTypeImageStorePath(Model): - """Path description for the application package in the image store specified - during the prior copy operation. +class ApplicationTypeImageStorePath(msrest.serialization.Model): + """Path description for the application package in the image store specified during the prior copy operation. All required parameters must be populated in order to send to Azure. - :param application_type_build_path: Required. The relative image store - path to the application package. + :param application_type_build_path: Required. The relative image store path to the application + package. :type application_type_build_path: str """ @@ -2408,34 +2854,35 @@ class ApplicationTypeImageStorePath(Model): 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, } - def __init__(self, *, application_type_build_path: str, **kwargs) -> None: + def __init__( + self, + *, + application_type_build_path: str, + **kwargs + ): super(ApplicationTypeImageStorePath, self).__init__(**kwargs) self.application_type_build_path = application_type_build_path -class ApplicationTypeInfo(Model): +class ApplicationTypeInfo(msrest.serialization.Model): """Information about an application type. - :param name: The application type name as defined in the application - manifest. + :param name: The application type name as defined in the application manifest. :type name: str - :param version: The version of the application type as defined in the - application manifest. + :param version: The version of the application type as defined in the application manifest. :type version: str - :param default_parameter_list: List of application type parameters that - can be overridden when creating or updating the application. - :type default_parameter_list: - list[~azure.servicefabric.models.ApplicationParameter] - :param status: The status of the application type. Possible values - include: 'Invalid', 'Provisioning', 'Available', 'Unprovisioning', - 'Failed' + :param default_parameter_list: List of application type parameters that can be overridden when + creating or updating the application. + :type default_parameter_list: list[~azure.servicefabric.models.ApplicationParameter] + :param status: The status of the application type. Possible values include: "Invalid", + "Provisioning", "Available", "Unprovisioning", "Failed". :type status: str or ~azure.servicefabric.models.ApplicationTypeStatus - :param status_details: Additional detailed information about the status of - the application type. + :param status_details: Additional detailed information about the status of the application + type. :type status_details: str - :param application_type_definition_kind: The mechanism used to define a - Service Fabric application type. Possible values include: 'Invalid', - 'ServiceFabricApplicationPackage', 'Compose' + :param application_type_definition_kind: The mechanism used to define a Service Fabric + application type. Possible values include: "Invalid", "ServiceFabricApplicationPackage", + "Compose". :type application_type_definition_kind: str or ~azure.servicefabric.models.ApplicationTypeDefinitionKind """ @@ -2449,7 +2896,17 @@ class ApplicationTypeInfo(Model): 'application_type_definition_kind': {'key': 'ApplicationTypeDefinitionKind', 'type': 'str'}, } - def __init__(self, *, name: str=None, version: str=None, default_parameter_list=None, status=None, status_details: str=None, application_type_definition_kind=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + version: Optional[str] = None, + default_parameter_list: Optional[List["ApplicationParameter"]] = None, + status: Optional[Union[str, "ApplicationTypeStatus"]] = None, + status_details: Optional[str] = None, + application_type_definition_kind: Optional[Union[str, "ApplicationTypeDefinitionKind"]] = None, + **kwargs + ): super(ApplicationTypeInfo, self).__init__(**kwargs) self.name = name self.version = version @@ -2459,9 +2916,8 @@ def __init__(self, *, name: str=None, version: str=None, default_parameter_list= self.application_type_definition_kind = application_type_definition_kind -class ApplicationTypeManifest(Model): - """Contains the manifest describing an application type registered in a - Service Fabric cluster. +class ApplicationTypeManifest(msrest.serialization.Model): + """Contains the manifest describing an application type registered in a Service Fabric cluster. :param manifest: The XML manifest as a string. :type manifest: str @@ -2471,7 +2927,12 @@ class ApplicationTypeManifest(Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__(self, *, manifest: str=None, **kwargs) -> None: + def __init__( + self, + *, + manifest: Optional[str] = None, + **kwargs + ): super(ApplicationTypeManifest, self).__init__(**kwargs) self.manifest = manifest @@ -2481,39 +2942,57 @@ class ApplicationUpgradeCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str :param application_type_version: Required. Application type version. :type application_type_version: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time - in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -2521,94 +3000,89 @@ class ApplicationUpgradeCompletedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_type_name: str, + application_type_version: str, + overall_upgrade_elapsed_time_in_ms: float, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationUpgradeCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationUpgradeCompleted' # type: str self.application_type_name = application_type_name self.application_type_version = application_type_version self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms - self.kind = 'ApplicationUpgradeCompleted' -class ApplicationUpgradeDescription(Model): - """Describes the parameters for an application upgrade. Note that upgrade - description replaces the existing application description. This means that - if the parameters are not specified, the existing parameters on the - applications will be overwritten with the empty parameters list. This would - result in the application using the default value of the parameters from - the application manifest. If you do not want to change any existing - parameter values, please get the application parameters first using the - GetApplicationInfo query and then supply those values as Parameters in this - ApplicationUpgradeDescription. +class ApplicationUpgradeDescription(msrest.serialization.Model): + """Describes the parameters for an application upgrade. Note that upgrade description replaces the existing application description. This means that if the parameters are not specified, the existing parameters on the applications will be overwritten with the empty parameters list. This would result in the application using the default value of the parameters from the application manifest. If you do not want to change any existing parameter values, please get the application parameters first using the GetApplicationInfo query and then supply those values as Parameters in this ApplicationUpgradeDescription. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the target application, including the - 'fabric:' URI scheme. + :param name: Required. The name of the target application, including the 'fabric:' URI scheme. :type name: str - :param target_application_type_version: Required. The target application - type version (found in the application manifest) for the application - upgrade. + :param target_application_type_version: Required. The target application type version (found in + the application manifest) for the application upgrade. :type target_application_type_version: str - :param parameters: List of application parameters with overridden values - from their default values specified in the application manifest. + :param parameters: List of application parameters with overridden values from their default + values specified in the application manifest. :type parameters: list[~azure.servicefabric.models.ApplicationParameter] - :param upgrade_kind: Required. The kind of upgrade out of the following - possible values. Possible values include: 'Invalid', 'Rolling'. Default - value: "Rolling" . + :param upgrade_kind: Required. The kind of upgrade out of the following possible values. + Possible values include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through - the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', - 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default - value: "Default" . + :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible + values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", + "ReverseLexicographical". Default value: "Default". :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate - the health of an application or one of its children entities. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param instance_close_delay_duration_in_seconds: Duration in seconds, to - wait before a stateless instance is closed, to allow the active requests - to drain gracefully. This would be effective when the instance is closing - during the application/cluster - upgrade, only for those instances which have a non-zero delay duration - configured in the service description. See - InstanceCloseDelayDurationSeconds property in $ref: + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate the health of an + application or one of its children entities. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a + stateless instance is closed, to allow the active requests to drain gracefully. This would be + effective when the instance is closing during the application/cluster + upgrade, only for those instances which have a non-zero delay duration configured in the + service description. See InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is - 4294967295, which indicates that the behavior will entirely depend on the - delay configured in the stateless service description. + Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates + that the behavior will entirely depend on the delay configured in the stateless service + description. :type instance_close_delay_duration_in_seconds: long + :param managed_application_identity: Managed application identity description. + :type managed_application_identity: + ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ _validation = { @@ -2629,9 +3103,26 @@ class ApplicationUpgradeDescription(Model): 'monitoring_policy': {'key': 'MonitoringPolicy', 'type': 'MonitoringPolicyDescription'}, 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, + 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__(self, *, name: str, target_application_type_version: str, parameters=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, sort_order="Default", monitoring_policy=None, application_health_policy=None, instance_close_delay_duration_in_seconds: int=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + target_application_type_version: str, + upgrade_kind: Union[str, "UpgradeKind"] = "Rolling", + parameters: Optional[List["ApplicationParameter"]] = None, + rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", + upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, + force_restart: Optional[bool] = False, + sort_order: Optional[Union[str, "UpgradeSortOrder"]] = "Default", + monitoring_policy: Optional["MonitoringPolicyDescription"] = None, + application_health_policy: Optional["ApplicationHealthPolicy"] = None, + instance_close_delay_duration_in_seconds: Optional[int] = 4294967295, + managed_application_identity: Optional["ManagedApplicationIdentityDescription"] = None, + **kwargs + ): super(ApplicationUpgradeDescription, self).__init__(**kwargs) self.name = name self.target_application_type_version = target_application_type_version @@ -2644,6 +3135,7 @@ def __init__(self, *, name: str, target_application_type_version: str, parameter self.monitoring_policy = monitoring_policy self.application_health_policy = application_health_policy self.instance_close_delay_duration_in_seconds = instance_close_delay_duration_in_seconds + self.managed_application_identity = managed_application_identity class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): @@ -2651,47 +3143,63 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application - type version. + :param current_application_type_version: Required. Current Application type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type - version. + :param application_type_version: Required. Target Application type version. :type application_type_version: str :param upgrade_state: Required. State of upgrade. :type upgrade_state: str :param upgrade_domains: Required. Upgrade domains. :type upgrade_domains: str - :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain - in milli-seconds. + :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain in milli-seconds. :type upgrade_domain_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -2702,11 +3210,11 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -2716,86 +3224,91 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, current_application_type_version: str, application_type_version: str, upgrade_state: str, upgrade_domains: str, upgrade_domain_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_type_name: str, + current_application_type_version: str, + application_type_version: str, + upgrade_state: str, + upgrade_domains: str, + upgrade_domain_elapsed_time_in_ms: float, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationUpgradeDomainCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationUpgradeDomainCompleted' # type: str self.application_type_name = application_type_name self.current_application_type_version = current_application_type_version self.application_type_version = application_type_version self.upgrade_state = upgrade_state self.upgrade_domains = upgrade_domains self.upgrade_domain_elapsed_time_in_ms = upgrade_domain_elapsed_time_in_ms - self.kind = 'ApplicationUpgradeDomainCompleted' -class ApplicationUpgradeProgressInfo(Model): +class ApplicationUpgradeProgressInfo(msrest.serialization.Model): """Describes the parameters for an application upgrade. - :param name: The name of the target application, including the 'fabric:' - URI scheme. + :param name: The name of the target application, including the 'fabric:' URI scheme. :type name: str - :param type_name: The application type name as defined in the application - manifest. + :param type_name: The application type name as defined in the application manifest. :type type_name: str - :param target_application_type_version: The target application type - version (found in the application manifest) for the application upgrade. + :param target_application_type_version: The target application type version (found in the + application manifest) for the application upgrade. :type target_application_type_version: str :param upgrade_domains: List of upgrade domains and their statuses. :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] - :param upgrade_state: The state of the upgrade domain. Possible values - include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', - 'RollingForwardPending', 'RollingForwardInProgress', - 'RollingForwardCompleted', 'Failed' + :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", + "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", + "RollingForwardInProgress", "RollingForwardCompleted", "Failed". :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState - :param next_upgrade_domain: The name of the next upgrade domain to be - processed. + :param next_upgrade_domain: The name of the next upgrade domain to be processed. :type next_upgrade_domain: str - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_description: Describes the parameters for an application - upgrade. Note that upgrade description replaces the existing application - description. This means that if the parameters are not specified, the - existing parameters on the applications will be overwritten with the empty - parameters list. This would result in the application using the default - value of the parameters from the application manifest. If you do not want - to change any existing parameter values, please get the application - parameters first using the GetApplicationInfo query and then supply those - values as Parameters in this ApplicationUpgradeDescription. - :type upgrade_description: - ~azure.servicefabric.models.ApplicationUpgradeDescription - :param upgrade_duration_in_milliseconds: The estimated total amount of - time spent processing the overall upgrade. + :param upgrade_description: Describes the parameters for an application upgrade. Note that + upgrade description replaces the existing application description. This means that if the + parameters are not specified, the existing parameters on the applications will be overwritten + with the empty parameters list. This would result in the application using the default value of + the parameters from the application manifest. If you do not want to change any existing + parameter values, please get the application parameters first using the GetApplicationInfo + query and then supply those values as Parameters in this ApplicationUpgradeDescription. + :type upgrade_description: ~azure.servicefabric.models.ApplicationUpgradeDescription + :param upgrade_duration_in_milliseconds: The estimated total amount of time spent processing + the overall upgrade. :type upgrade_duration_in_milliseconds: str - :param upgrade_domain_duration_in_milliseconds: The estimated total amount - of time spent processing the current upgrade domain. + :param upgrade_domain_duration_in_milliseconds: The estimated total amount of time spent + processing the current upgrade domain. :type upgrade_domain_duration_in_milliseconds: str - :param unhealthy_evaluations: List of health evaluations that resulted in - the current aggregated health state. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current - in-progress upgrade domain. + :param unhealthy_evaluations: List of health evaluations that resulted in the current + aggregated health state. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current in-progress upgrade + domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo - :param start_timestamp_utc: The estimated UTC datetime when the upgrade - started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. :type start_timestamp_utc: str - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade - failed and FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and + FailureAction was executed. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in - FailureAction being executed. Possible values include: 'None', - 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', - 'OverallUpgradeTimeout' + :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being + executed. Possible values include: "None", "Interrupted", "HealthCheck", + "UpgradeDomainTimeout", "OverallUpgradeTimeout". :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: Information about the upgrade - domain progress at the time of upgrade failure. + :param upgrade_domain_progress_at_failure: Information about the upgrade domain progress at the + time of upgrade failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo - :param upgrade_status_details: Additional detailed information about the - status of the pending upgrade. + :param upgrade_status_details: Additional detailed information about the status of the pending + upgrade. :type upgrade_status_details: str """ @@ -2819,7 +3332,28 @@ class ApplicationUpgradeProgressInfo(Model): 'upgrade_status_details': {'key': 'UpgradeStatusDetails', 'type': 'str'}, } - def __init__(self, *, name: str=None, type_name: str=None, target_application_type_version: str=None, upgrade_domains=None, upgrade_state=None, next_upgrade_domain: str=None, rolling_upgrade_mode="UnmonitoredAuto", upgrade_description=None, upgrade_duration_in_milliseconds: str=None, upgrade_domain_duration_in_milliseconds: str=None, unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc: str=None, failure_timestamp_utc: str=None, failure_reason=None, upgrade_domain_progress_at_failure=None, upgrade_status_details: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + type_name: Optional[str] = None, + target_application_type_version: Optional[str] = None, + upgrade_domains: Optional[List["UpgradeDomainInfo"]] = None, + upgrade_state: Optional[Union[str, "UpgradeState"]] = None, + next_upgrade_domain: Optional[str] = None, + rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", + upgrade_description: Optional["ApplicationUpgradeDescription"] = None, + upgrade_duration_in_milliseconds: Optional[str] = None, + upgrade_domain_duration_in_milliseconds: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + current_upgrade_domain_progress: Optional["CurrentUpgradeDomainProgressInfo"] = None, + start_timestamp_utc: Optional[str] = None, + failure_timestamp_utc: Optional[str] = None, + failure_reason: Optional[Union[str, "FailureReason"]] = None, + upgrade_domain_progress_at_failure: Optional["FailureUpgradeDomainProgressInfo"] = None, + upgrade_status_details: Optional[str] = None, + **kwargs + ): super(ApplicationUpgradeProgressInfo, self).__init__(**kwargs) self.name = name self.type_name = type_name @@ -2845,25 +3379,44 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -2871,15 +3424,14 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): :type application_type_version: str :param failure_reason: Required. Describes reason of failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time - in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -2888,11 +3440,11 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, @@ -2900,13 +3452,26 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_type_name: str, + application_type_version: str, + failure_reason: str, + overall_upgrade_elapsed_time_in_ms: float, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationUpgradeRollbackCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationUpgradeRollbackCompleted' # type: str self.application_type_name = application_type_name self.application_type_version = application_type_version self.failure_reason = failure_reason self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms - self.kind = 'ApplicationUpgradeRollbackCompleted' class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): @@ -2914,45 +3479,61 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application - type version. + :param current_application_type_version: Required. Current Application type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type - version. + :param application_type_version: Required. Target Application type version. :type application_type_version: str :param failure_reason: Required. Describes reason of failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time - in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -2962,11 +3543,11 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -2975,14 +3556,28 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, current_application_type_version: str, application_type_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_type_name: str, + current_application_type_version: str, + application_type_version: str, + failure_reason: str, + overall_upgrade_elapsed_time_in_ms: float, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationUpgradeRollbackStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationUpgradeRollbackStarted' # type: str self.application_type_name = application_type_name self.current_application_type_version = current_application_type_version self.application_type_version = application_type_version self.failure_reason = failure_reason self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms - self.kind = 'ApplicationUpgradeRollbackStarted' class ApplicationUpgradeStartedEvent(ApplicationEvent): @@ -2990,33 +3585,50 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application - type version. + :param current_application_type_version: Required. Current Application type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type - version. + :param application_type_version: Required. Target Application type version. :type application_type_version: str :param upgrade_type: Required. Type of upgrade. :type upgrade_type: str @@ -3027,9 +3639,9 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -3040,11 +3652,11 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -3054,37 +3666,48 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): 'failure_action': {'key': 'FailureAction', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, current_application_type_version: str, application_type_version: str, upgrade_type: str, rolling_upgrade_mode: str, failure_action: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_type_name: str, + current_application_type_version: str, + application_type_version: str, + upgrade_type: str, + rolling_upgrade_mode: str, + failure_action: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ApplicationUpgradeStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ApplicationUpgradeStarted' # type: str self.application_type_name = application_type_name self.current_application_type_version = current_application_type_version self.application_type_version = application_type_version self.upgrade_type = upgrade_type self.rolling_upgrade_mode = rolling_upgrade_mode self.failure_action = failure_action - self.kind = 'ApplicationUpgradeStarted' -class ApplicationUpgradeUpdateDescription(Model): +class ApplicationUpgradeUpdateDescription(msrest.serialization.Model): """Describes the parameters for updating an ongoing application upgrade. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the application, including the - 'fabric:' URI scheme. + :param name: Required. The name of the application, including the 'fabric:' URI scheme. :type name: str - :param upgrade_kind: Required. The kind of upgrade out of the following - possible values. Possible values include: 'Invalid', 'Rolling'. Default - value: "Rolling" . + :param upgrade_kind: Required. The kind of upgrade out of the following possible values. + Possible values include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param application_health_policy: Defines a health policy used to evaluate - the health of an application or one of its children entities. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param update_description: Describes the parameters for updating a rolling - upgrade of application or cluster. - :type update_description: - ~azure.servicefabric.models.RollingUpgradeUpdateDescription + :param application_health_policy: Defines a health policy used to evaluate the health of an + application or one of its children entities. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :param update_description: Describes the parameters for updating a rolling upgrade of + application or cluster. + :type update_description: ~azure.servicefabric.models.RollingUpgradeUpdateDescription """ _validation = { @@ -3099,7 +3722,15 @@ class ApplicationUpgradeUpdateDescription(Model): 'update_description': {'key': 'UpdateDescription', 'type': 'RollingUpgradeUpdateDescription'}, } - def __init__(self, *, name: str, upgrade_kind="Rolling", application_health_policy=None, update_description=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + upgrade_kind: Union[str, "UpgradeKind"] = "Rolling", + application_health_policy: Optional["ApplicationHealthPolicy"] = None, + update_description: Optional["RollingUpgradeUpdateDescription"] = None, + **kwargs + ): super(ApplicationUpgradeUpdateDescription, self).__init__(**kwargs) self.name = name self.upgrade_kind = upgrade_kind @@ -3107,17 +3738,17 @@ def __init__(self, *, name: str, upgrade_kind="Rolling", application_health_poli self.update_description = update_description -class AutoScalingMetric(Model): - """Describes the metric that is used for triggering auto scaling operation. - Derived classes will describe resources or metrics. +class AutoScalingMetric(msrest.serialization.Model): + """Describes the metric that is used for triggering auto scaling operation. Derived classes will describe resources or metrics. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AutoScalingResourceMetric + sub-classes are: AutoScalingResourceMetric. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of auto scaling metric.Constant filled by server. Possible + values include: "Resource". + :type kind: str or ~azure.servicefabric.models.AutoScalingMetricKind """ _validation = { @@ -3132,23 +3763,25 @@ class AutoScalingMetric(Model): 'kind': {'Resource': 'AutoScalingResourceMetric'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(AutoScalingMetric, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] -class AutoScalingPolicy(Model): +class AutoScalingPolicy(msrest.serialization.Model): """Describes the auto scaling policy. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the auto scaling policy. :type name: str - :param trigger: Required. Determines when auto scaling operation will be - invoked. + :param trigger: Required. Determines when auto scaling operation will be invoked. :type trigger: ~azure.servicefabric.models.AutoScalingTrigger - :param mechanism: Required. The mechanism that is used to scale when auto - scaling operation is invoked. + :param mechanism: Required. The mechanism that is used to scale when auto scaling operation is + invoked. :type mechanism: ~azure.servicefabric.models.AutoScalingMechanism """ @@ -3164,7 +3797,14 @@ class AutoScalingPolicy(Model): 'mechanism': {'key': 'mechanism', 'type': 'AutoScalingMechanism'}, } - def __init__(self, *, name: str, trigger, mechanism, **kwargs) -> None: + def __init__( + self, + *, + name: str, + trigger: "AutoScalingTrigger", + mechanism: "AutoScalingMechanism", + **kwargs + ): super(AutoScalingPolicy, self).__init__(**kwargs) self.name = name self.trigger = trigger @@ -3176,12 +3816,11 @@ class AutoScalingResourceMetric(AutoScalingMetric): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param name: Required. Name of the resource. Possible values include: - 'cpu', 'memoryInGB' - :type name: str or - ~azure.servicefabric.models.AutoScalingResourceMetricName + :param kind: Required. The type of auto scaling metric.Constant filled by server. Possible + values include: "Resource". + :type kind: str or ~azure.servicefabric.models.AutoScalingMetricKind + :param name: Required. Name of the resource. Possible values include: "cpu", "memoryInGB". + :type name: str or ~azure.servicefabric.models.AutoScalingResourceMetricName """ _validation = { @@ -3194,22 +3833,28 @@ class AutoScalingResourceMetric(AutoScalingMetric): 'name': {'key': 'name', 'type': 'str'}, } - def __init__(self, *, name, **kwargs) -> None: + def __init__( + self, + *, + name: Union[str, "AutoScalingResourceMetricName"], + **kwargs + ): super(AutoScalingResourceMetric, self).__init__(**kwargs) + self.kind = 'Resource' # type: str self.name = name - self.kind = 'Resource' -class AutoScalingTrigger(Model): +class AutoScalingTrigger(msrest.serialization.Model): """Describes the trigger for performing auto scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AverageLoadScalingTrigger + sub-classes are: AverageLoadScalingTrigger. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of auto scaling trigger.Constant filled by server. Possible + values include: "AverageLoad". + :type kind: str or ~azure.servicefabric.models.AutoScalingTriggerKind """ _validation = { @@ -3224,9 +3869,12 @@ class AutoScalingTrigger(Model): 'kind': {'AverageLoad': 'AverageLoadScalingTrigger'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(AutoScalingTrigger, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class AverageLoadScalingTrigger(AutoScalingTrigger): @@ -3234,19 +3882,19 @@ class AverageLoadScalingTrigger(AutoScalingTrigger): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param metric: Required. Description of the metric that is used for - scaling. + :param kind: Required. The type of auto scaling trigger.Constant filled by server. Possible + values include: "AverageLoad". + :type kind: str or ~azure.servicefabric.models.AutoScalingTriggerKind + :param metric: Required. Description of the metric that is used for scaling. :type metric: ~azure.servicefabric.models.AutoScalingMetric - :param lower_load_threshold: Required. Lower load threshold (if average - load is below this threshold, service will scale down). + :param lower_load_threshold: Required. Lower load threshold (if average load is below this + threshold, service will scale down). :type lower_load_threshold: float - :param upper_load_threshold: Required. Upper load threshold (if average - load is above this threshold, service will scale up). + :param upper_load_threshold: Required. Upper load threshold (if average load is above this + threshold, service will scale up). :type upper_load_threshold: float - :param scale_interval_in_seconds: Required. Scale interval that indicates - how often will this trigger be checked. + :param scale_interval_in_seconds: Required. Scale interval that indicates how often will this + trigger be checked. :type scale_interval_in_seconds: int """ @@ -3266,26 +3914,34 @@ class AverageLoadScalingTrigger(AutoScalingTrigger): 'scale_interval_in_seconds': {'key': 'scaleIntervalInSeconds', 'type': 'int'}, } - def __init__(self, *, metric, lower_load_threshold: float, upper_load_threshold: float, scale_interval_in_seconds: int, **kwargs) -> None: + def __init__( + self, + *, + metric: "AutoScalingMetric", + lower_load_threshold: float, + upper_load_threshold: float, + scale_interval_in_seconds: int, + **kwargs + ): super(AverageLoadScalingTrigger, self).__init__(**kwargs) + self.kind = 'AverageLoad' # type: str self.metric = metric self.lower_load_threshold = lower_load_threshold self.upper_load_threshold = upper_load_threshold self.scale_interval_in_seconds = scale_interval_in_seconds - self.kind = 'AverageLoad' -class ScalingTriggerDescription(Model): +class ScalingTriggerDescription(msrest.serialization.Model): """Describes the trigger for performing a scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AveragePartitionLoadScalingTrigger, - AverageServiceLoadScalingTrigger + sub-classes are: AveragePartitionLoadScalingTrigger, AverageServiceLoadScalingTrigger. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. + Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". + :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind """ _validation = { @@ -3300,30 +3956,32 @@ class ScalingTriggerDescription(Model): 'kind': {'AveragePartitionLoad': 'AveragePartitionLoadScalingTrigger', 'AverageServiceLoad': 'AverageServiceLoadScalingTrigger'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(ScalingTriggerDescription, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): - """Represents a scaling trigger related to an average load of a - metric/resource of a partition. + """Represents a scaling trigger related to an average load of a metric/resource of a partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param metric_name: Required. The name of the metric for which usage - should be tracked. + :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. + Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". + :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind + :param metric_name: Required. The name of the metric for which usage should be tracked. :type metric_name: str - :param lower_load_threshold: Required. The lower limit of the load below - which a scale in operation should be performed. + :param lower_load_threshold: Required. The lower limit of the load below which a scale in + operation should be performed. :type lower_load_threshold: str - :param upper_load_threshold: Required. The upper limit of the load beyond - which a scale out operation should be performed. + :param upper_load_threshold: Required. The upper limit of the load beyond which a scale out + operation should be performed. :type upper_load_threshold: str - :param scale_interval_in_seconds: Required. The period in seconds on which - a decision is made whether to scale or not. + :param scale_interval_in_seconds: Required. The period in seconds on which a decision is made + whether to scale or not. :type scale_interval_in_seconds: long """ @@ -3343,35 +4001,49 @@ class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, } - def __init__(self, *, metric_name: str, lower_load_threshold: str, upper_load_threshold: str, scale_interval_in_seconds: int, **kwargs) -> None: + def __init__( + self, + *, + metric_name: str, + lower_load_threshold: str, + upper_load_threshold: str, + scale_interval_in_seconds: int, + **kwargs + ): super(AveragePartitionLoadScalingTrigger, self).__init__(**kwargs) + self.kind = 'AveragePartitionLoad' # type: str self.metric_name = metric_name self.lower_load_threshold = lower_load_threshold self.upper_load_threshold = upper_load_threshold self.scale_interval_in_seconds = scale_interval_in_seconds - self.kind = 'AveragePartitionLoad' class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): - """Represents a scaling policy related to an average load of a metric/resource - of a service. + """Represents a scaling policy related to an average load of a metric/resource of a service. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param metric_name: Required. The name of the metric for which usage - should be tracked. + :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. + Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". + :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind + :param metric_name: Required. The name of the metric for which usage should be tracked. :type metric_name: str - :param lower_load_threshold: Required. The lower limit of the load below - which a scale in operation should be performed. + :param lower_load_threshold: Required. The lower limit of the load below which a scale in + operation should be performed. :type lower_load_threshold: str - :param upper_load_threshold: Required. The upper limit of the load beyond - which a scale out operation should be performed. + :param upper_load_threshold: Required. The upper limit of the load beyond which a scale out + operation should be performed. :type upper_load_threshold: str - :param scale_interval_in_seconds: Required. The period in seconds on which - a decision is made whether to scale or not. + :param scale_interval_in_seconds: Required. The period in seconds on which a decision is made + whether to scale or not. :type scale_interval_in_seconds: long + :param use_only_primary_load: Required. Flag determines whether only the load of primary + replica should be considered for scaling. + If set to true, then trigger will only consider the load of primary replicas of stateful + service. + If set to false, trigger will consider load of all replicas. + This parameter cannot be set to true for stateless service. + :type use_only_primary_load: bool """ _validation = { @@ -3380,6 +4052,7 @@ class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): 'lower_load_threshold': {'required': True}, 'upper_load_threshold': {'required': True}, 'scale_interval_in_seconds': {'required': True, 'maximum': 4294967295, 'minimum': 0}, + 'use_only_primary_load': {'required': True}, } _attribute_map = { @@ -3388,30 +4061,42 @@ class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): 'lower_load_threshold': {'key': 'LowerLoadThreshold', 'type': 'str'}, 'upper_load_threshold': {'key': 'UpperLoadThreshold', 'type': 'str'}, 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, - } - - def __init__(self, *, metric_name: str, lower_load_threshold: str, upper_load_threshold: str, scale_interval_in_seconds: int, **kwargs) -> None: + 'use_only_primary_load': {'key': 'UseOnlyPrimaryLoad', 'type': 'bool'}, + } + + def __init__( + self, + *, + metric_name: str, + lower_load_threshold: str, + upper_load_threshold: str, + scale_interval_in_seconds: int, + use_only_primary_load: bool, + **kwargs + ): super(AverageServiceLoadScalingTrigger, self).__init__(**kwargs) + self.kind = 'AverageServiceLoad' # type: str self.metric_name = metric_name self.lower_load_threshold = lower_load_threshold self.upper_load_threshold = upper_load_threshold self.scale_interval_in_seconds = scale_interval_in_seconds - self.kind = 'AverageServiceLoad' + self.use_only_primary_load = use_only_primary_load -class BackupStorageDescription(Model): +class BackupStorageDescription(msrest.serialization.Model): """Describes the parameters for the backup storage. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AzureBlobBackupStorageDescription, - FileShareBackupStorageDescription, DsmsAzureBlobBackupStorageDescription + sub-classes are: AzureBlobBackupStorageDescription, DsmsAzureBlobBackupStorageDescription, FileShareBackupStorageDescription, ManagedIdentityAzureBlobBackupStorageDescription. All required parameters must be populated in order to send to Azure. + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_kind: Required. Constant filled by server. - :type storage_kind: str """ _validation = { @@ -3419,35 +4104,40 @@ class BackupStorageDescription(Model): } _attribute_map = { - 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, } _subtype_map = { - 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription', 'DsmsAzureBlobStore': 'DsmsAzureBlobBackupStorageDescription'} + 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'DsmsAzureBlobStore': 'DsmsAzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription', 'ManagedIdentityAzureBlobStore': 'ManagedIdentityAzureBlobBackupStorageDescription'} } - def __init__(self, *, friendly_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + friendly_name: Optional[str] = None, + **kwargs + ): super(BackupStorageDescription, self).__init__(**kwargs) + self.storage_kind = None # type: Optional[str] self.friendly_name = friendly_name - self.storage_kind = None class AzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Azure blob store used for storing and - enumerating backups. + """Describes the parameters for Azure blob store used for storing and enumerating backups. All required parameters must be populated in order to send to Azure. + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_kind: Required. Constant filled by server. - :type storage_kind: str - :param connection_string: Required. The connection string to connect to - the Azure blob store. + :param connection_string: Required. The connection string to connect to the Azure blob store. :type connection_string: str - :param container_name: Required. The name of the container in the blob - store to store and enumerate backups from. + :param container_name: Required. The name of the container in the blob store to store and + enumerate backups from. :type container_name: str """ @@ -3458,34 +4148,41 @@ class AzureBlobBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'connection_string': {'key': 'ConnectionString', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__(self, *, connection_string: str, container_name: str, friendly_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + connection_string: str, + container_name: str, + friendly_name: Optional[str] = None, + **kwargs + ): super(AzureBlobBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) + self.storage_kind = 'AzureBlobStore' # type: str self.connection_string = connection_string self.container_name = container_name - self.storage_kind = 'AzureBlobStore' -class DiagnosticsSinkProperties(Model): +class DiagnosticsSinkProperties(msrest.serialization.Model): """Properties of a DiagnosticsSink. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AzureInternalMonitoringPipelineSinkDescription + sub-classes are: AzureInternalMonitoringPipelineSinkDescription. All required parameters must be populated in order to send to Azure. - :param name: Name of the sink. This value is referenced by - DiagnosticsReferenceDescription + :param kind: Required. The kind of DiagnosticsSink.Constant filled by server. Possible values + include: "Invalid", "AzureInternalMonitoringPipeline". + :type kind: str or ~azure.servicefabric.models.DiagnosticsSinkKind + :param name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription. :type name: str :param description: A description of the sink. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { @@ -3493,20 +4190,26 @@ class DiagnosticsSinkProperties(Model): } _attribute_map = { + 'kind': {'key': 'kind', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, - 'kind': {'key': 'kind', 'type': 'str'}, } _subtype_map = { 'kind': {'AzureInternalMonitoringPipeline': 'AzureInternalMonitoringPipelineSinkDescription'} } - def __init__(self, *, name: str=None, description: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + description: Optional[str] = None, + **kwargs + ): super(DiagnosticsSinkProperties, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.name = name self.description = description - self.kind = None class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): @@ -3514,24 +4217,23 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): All required parameters must be populated in order to send to Azure. - :param name: Name of the sink. This value is referenced by - DiagnosticsReferenceDescription + :param kind: Required. The kind of DiagnosticsSink.Constant filled by server. Possible values + include: "Invalid", "AzureInternalMonitoringPipeline". + :type kind: str or ~azure.servicefabric.models.DiagnosticsSinkKind + :param name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription. :type name: str :param description: A description of the sink. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param account_name: Azure Internal monitoring pipeline account. :type account_name: str :param namespace: Azure Internal monitoring pipeline account namespace. :type namespace: str :param ma_config_url: Azure Internal monitoring agent configuration. :type ma_config_url: str - :param fluentd_config_url: Azure Internal monitoring agent fluentd - configuration. + :param fluentd_config_url: Azure Internal monitoring agent fluentd configuration. :type fluentd_config_url: str - :param auto_key_config_url: Azure Internal monitoring pipeline autokey - associated with the certificate. + :param auto_key_config_url: Azure Internal monitoring pipeline autokey associated with the + certificate. :type auto_key_config_url: str """ @@ -3540,9 +4242,9 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): } _attribute_map = { + 'kind': {'key': 'kind', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, - 'kind': {'key': 'kind', 'type': 'str'}, 'account_name': {'key': 'accountName', 'type': 'str'}, 'namespace': {'key': 'namespace', 'type': 'str'}, 'ma_config_url': {'key': 'maConfigUrl', 'type': 'str'}, @@ -3550,53 +4252,57 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): 'auto_key_config_url': {'key': 'autoKeyConfigUrl', 'type': 'str'}, } - def __init__(self, *, name: str=None, description: str=None, account_name: str=None, namespace: str=None, ma_config_url: str=None, fluentd_config_url: str=None, auto_key_config_url: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + description: Optional[str] = None, + account_name: Optional[str] = None, + namespace: Optional[str] = None, + ma_config_url: Optional[str] = None, + fluentd_config_url: Optional[str] = None, + auto_key_config_url: Optional[str] = None, + **kwargs + ): super(AzureInternalMonitoringPipelineSinkDescription, self).__init__(name=name, description=description, **kwargs) + self.kind = 'AzureInternalMonitoringPipeline' # type: str self.account_name = account_name self.namespace = namespace self.ma_config_url = ma_config_url self.fluentd_config_url = fluentd_config_url self.auto_key_config_url = auto_key_config_url - self.kind = 'AzureInternalMonitoringPipeline' -class BackupInfo(Model): +class BackupInfo(msrest.serialization.Model): """Represents a backup point which can be used to trigger a restore. :param backup_id: Unique backup ID . :type backup_id: str - :param backup_chain_id: Unique backup chain ID. All backups part of the - same chain has the same backup chain id. A backup chain is comprised of 1 - full backup and multiple incremental backups. + :param backup_chain_id: Unique backup chain ID. All backups part of the same chain has the same + backup chain id. A backup chain is comprised of 1 full backup and multiple incremental backups. :type backup_chain_id: str - :param application_name: Name of the Service Fabric application this - partition backup belongs to. + :param application_name: Name of the Service Fabric application this partition backup belongs + to. :type application_name: str - :param service_name: Name of the Service Fabric service this partition - backup belongs to. + :param service_name: Name of the Service Fabric service this partition backup belongs to. :type service_name: str - :param partition_information: Information about the partition to which - this backup belongs to - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param backup_location: Location of the backup, relative to the backup - store. + :param partition_information: Information about the partition to which this backup belongs to. + :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param backup_location: Location of the backup, relative to the backup store. :type backup_location: str - :param backup_type: Describes the type of backup, whether its full or - incremental. Possible values include: 'Invalid', 'Full', 'Incremental' + :param backup_type: Describes the type of backup, whether its full or incremental. Possible + values include: "Invalid", "Full", "Incremental". :type backup_type: str or ~azure.servicefabric.models.BackupType - :param epoch_of_last_backup_record: Epoch of the last record in this - backup. + :param epoch_of_last_backup_record: Epoch of the last record in this backup. :type epoch_of_last_backup_record: ~azure.servicefabric.models.Epoch :param lsn_of_last_backup_record: LSN of the last record in this backup. :type lsn_of_last_backup_record: str :param creation_time_utc: The date time when this backup was taken. - :type creation_time_utc: datetime - :param service_manifest_version: Manifest Version of the service this - partition backup belongs to. + :type creation_time_utc: ~datetime.datetime + :param service_manifest_version: Manifest Version of the service this partition backup belongs + to. :type service_manifest_version: str - :param failure_error: Denotes the failure encountered in getting backup - point information. + :param failure_error: Denotes the failure encountered in getting backup point information. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -3615,7 +4321,23 @@ class BackupInfo(Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__(self, *, backup_id: str=None, backup_chain_id: str=None, application_name: str=None, service_name: str=None, partition_information=None, backup_location: str=None, backup_type=None, epoch_of_last_backup_record=None, lsn_of_last_backup_record: str=None, creation_time_utc=None, service_manifest_version: str=None, failure_error=None, **kwargs) -> None: + def __init__( + self, + *, + backup_id: Optional[str] = None, + backup_chain_id: Optional[str] = None, + application_name: Optional[str] = None, + service_name: Optional[str] = None, + partition_information: Optional["PartitionInformation"] = None, + backup_location: Optional[str] = None, + backup_type: Optional[Union[str, "BackupType"]] = None, + epoch_of_last_backup_record: Optional["Epoch"] = None, + lsn_of_last_backup_record: Optional[str] = None, + creation_time_utc: Optional[datetime.datetime] = None, + service_manifest_version: Optional[str] = None, + failure_error: Optional["FabricErrorError"] = None, + **kwargs + ): super(BackupInfo, self).__init__(**kwargs) self.backup_id = backup_id self.backup_chain_id = backup_chain_id @@ -3631,11 +4353,10 @@ def __init__(self, *, backup_id: str=None, backup_chain_id: str=None, applicatio self.failure_error = failure_error -class BackupPartitionDescription(Model): +class BackupPartitionDescription(msrest.serialization.Model): """Describes the parameters for triggering partition's backup. - :param backup_storage: Specifies the details of the backup storage where - to save the backup. + :param backup_storage: Specifies the details of the backup storage where to save the backup. :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription """ @@ -3643,39 +4364,42 @@ class BackupPartitionDescription(Model): 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, } - def __init__(self, *, backup_storage=None, **kwargs) -> None: + def __init__( + self, + *, + backup_storage: Optional["BackupStorageDescription"] = None, + **kwargs + ): super(BackupPartitionDescription, self).__init__(**kwargs) self.backup_storage = backup_storage -class BackupPolicyDescription(Model): +class BackupPolicyDescription(msrest.serialization.Model): """Describes a backup policy for configuring periodic backup. All required parameters must be populated in order to send to Azure. :param name: Required. The unique name identifying this backup policy. :type name: str - :param auto_restore_on_data_loss: Required. Specifies whether to trigger - restore automatically using the latest available backup in case the - partition experiences a data loss event. + :param auto_restore_on_data_loss: Required. Specifies whether to trigger restore automatically + using the latest available backup in case the partition experiences a data loss event. :type auto_restore_on_data_loss: bool - :param max_incremental_backups: Required. Defines the maximum number of - incremental backups to be taken between two full backups. This is just the - upper limit. A full backup may be taken before specified number of - incremental backups are completed in one of the following conditions - - The replica has never taken a full backup since it has become primary, - - Some of the log records since the last backup has been truncated, or - - Replica passed the MaxAccumulatedBackupLogSizeInMB limit. + :param max_incremental_backups: Required. Defines the maximum number of incremental backups to + be taken between two full backups. This is just the upper limit. A full backup may be taken + before specified number of incremental backups are completed in one of the following conditions + + + * The replica has never taken a full backup since it has become primary, + * Some of the log records since the last backup has been truncated, or + * Replica passed the MaxAccumulatedBackupLogSizeInMB limit. :type max_incremental_backups: int :param schedule: Required. Describes the backup schedule parameters. :type schedule: ~azure.servicefabric.models.BackupScheduleDescription - :param storage: Required. Describes the details of backup storage where to - store the periodic backups. + :param storage: Required. Describes the details of backup storage where to store the periodic + backups. :type storage: ~azure.servicefabric.models.BackupStorageDescription - :param retention_policy: Describes the policy to retain backups in - storage. - :type retention_policy: - ~azure.servicefabric.models.RetentionPolicyDescription + :param retention_policy: Describes the policy to retain backups in storage. + :type retention_policy: ~azure.servicefabric.models.RetentionPolicyDescription """ _validation = { @@ -3695,7 +4419,17 @@ class BackupPolicyDescription(Model): 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicyDescription'}, } - def __init__(self, *, name: str, auto_restore_on_data_loss: bool, max_incremental_backups: int, schedule, storage, retention_policy=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + auto_restore_on_data_loss: bool, + max_incremental_backups: int, + schedule: "BackupScheduleDescription", + storage: "BackupStorageDescription", + retention_policy: Optional["RetentionPolicyDescription"] = None, + **kwargs + ): super(BackupPolicyDescription, self).__init__(**kwargs) self.name = name self.auto_restore_on_data_loss = auto_restore_on_data_loss @@ -3705,29 +4439,23 @@ def __init__(self, *, name: str, auto_restore_on_data_loss: bool, max_incrementa self.retention_policy = retention_policy -class BackupProgressInfo(Model): +class BackupProgressInfo(msrest.serialization.Model): """Describes the progress of a partition's backup. - :param backup_state: Represents the current state of the partition backup - operation. Possible values include: 'Invalid', 'Accepted', - 'BackupInProgress', 'Success', 'Failure', 'Timeout' + :param backup_state: Represents the current state of the partition backup operation. Possible + values include: "Invalid", "Accepted", "BackupInProgress", "Success", "Failure", "Timeout". :type backup_state: str or ~azure.servicefabric.models.BackupState - :param time_stamp_utc: TimeStamp in UTC when operation succeeded or - failed. - :type time_stamp_utc: datetime + :param time_stamp_utc: TimeStamp in UTC when operation succeeded or failed. + :type time_stamp_utc: ~datetime.datetime :param backup_id: Unique ID of the newly created backup. :type backup_id: str - :param backup_location: Location, relative to the backup store, of the - newly created backup. + :param backup_location: Location, relative to the backup store, of the newly created backup. :type backup_location: str - :param epoch_of_last_backup_record: Specifies the epoch of the last record - included in backup. + :param epoch_of_last_backup_record: Specifies the epoch of the last record included in backup. :type epoch_of_last_backup_record: ~azure.servicefabric.models.Epoch - :param lsn_of_last_backup_record: The LSN of last record included in - backup. + :param lsn_of_last_backup_record: The LSN of last record included in backup. :type lsn_of_last_backup_record: str - :param failure_error: Denotes the failure encountered in performing backup - operation. + :param failure_error: Denotes the failure encountered in performing backup operation. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -3741,7 +4469,18 @@ class BackupProgressInfo(Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__(self, *, backup_state=None, time_stamp_utc=None, backup_id: str=None, backup_location: str=None, epoch_of_last_backup_record=None, lsn_of_last_backup_record: str=None, failure_error=None, **kwargs) -> None: + def __init__( + self, + *, + backup_state: Optional[Union[str, "BackupState"]] = None, + time_stamp_utc: Optional[datetime.datetime] = None, + backup_id: Optional[str] = None, + backup_location: Optional[str] = None, + epoch_of_last_backup_record: Optional["Epoch"] = None, + lsn_of_last_backup_record: Optional[str] = None, + failure_error: Optional["FabricErrorError"] = None, + **kwargs + ): super(BackupProgressInfo, self).__init__(**kwargs) self.backup_state = backup_state self.time_stamp_utc = time_stamp_utc @@ -3752,17 +4491,18 @@ def __init__(self, *, backup_state=None, time_stamp_utc=None, backup_id: str=Non self.failure_error = failure_error -class BackupScheduleDescription(Model): +class BackupScheduleDescription(msrest.serialization.Model): """Describes the backup schedule parameters. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FrequencyBasedBackupScheduleDescription, - TimeBasedBackupScheduleDescription + sub-classes are: FrequencyBasedBackupScheduleDescription, TimeBasedBackupScheduleDescription. All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. Constant filled by server. - :type schedule_kind: str + :param schedule_kind: Required. The kind of backup schedule, time based or frequency + based.Constant filled by server. Possible values include: "Invalid", "TimeBased", + "FrequencyBased". + :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind """ _validation = { @@ -3777,22 +4517,22 @@ class BackupScheduleDescription(Model): 'schedule_kind': {'FrequencyBased': 'FrequencyBasedBackupScheduleDescription', 'TimeBased': 'TimeBasedBackupScheduleDescription'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(BackupScheduleDescription, self).__init__(**kwargs) - self.schedule_kind = None + self.schedule_kind = None # type: Optional[str] -class BackupSuspensionInfo(Model): +class BackupSuspensionInfo(msrest.serialization.Model): """Describes the backup suspension details. - :param is_suspended: Indicates whether periodic backup is suspended at - this level or not. + :param is_suspended: Indicates whether periodic backup is suspended at this level or not. :type is_suspended: bool - :param suspension_inherited_from: Specifies the scope at which the backup - suspension was applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type suspension_inherited_from: str or - ~azure.servicefabric.models.BackupSuspensionScope + :param suspension_inherited_from: Specifies the scope at which the backup suspension was + applied. Possible values include: "Invalid", "Partition", "Service", "Application". + :type suspension_inherited_from: str or ~azure.servicefabric.models.BackupSuspensionScope """ _attribute_map = { @@ -3800,22 +4540,30 @@ class BackupSuspensionInfo(Model): 'suspension_inherited_from': {'key': 'SuspensionInheritedFrom', 'type': 'str'}, } - def __init__(self, *, is_suspended: bool=None, suspension_inherited_from=None, **kwargs) -> None: + def __init__( + self, + *, + is_suspended: Optional[bool] = None, + suspension_inherited_from: Optional[Union[str, "BackupSuspensionScope"]] = None, + **kwargs + ): super(BackupSuspensionInfo, self).__init__(**kwargs) self.is_suspended = is_suspended self.suspension_inherited_from = suspension_inherited_from -class RetentionPolicyDescription(Model): +class RetentionPolicyDescription(msrest.serialization.Model): """Describes the retention policy configured. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: BasicRetentionPolicyDescription + sub-classes are: BasicRetentionPolicyDescription. All required parameters must be populated in order to send to Azure. - :param retention_policy_type: Required. Constant filled by server. - :type retention_policy_type: str + :param retention_policy_type: Required. The type of retention policy. Currently only "Basic" + retention policy is supported.Constant filled by server. Possible values include: "Basic", + "Invalid". + :type retention_policy_type: str or ~azure.servicefabric.models.RetentionPolicyType """ _validation = { @@ -3830,9 +4578,12 @@ class RetentionPolicyDescription(Model): 'retention_policy_type': {'Basic': 'BasicRetentionPolicyDescription'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(RetentionPolicyDescription, self).__init__(**kwargs) - self.retention_policy_type = None + self.retention_policy_type = None # type: Optional[str] class BasicRetentionPolicyDescription(RetentionPolicyDescription): @@ -3840,16 +4591,17 @@ class BasicRetentionPolicyDescription(RetentionPolicyDescription): All required parameters must be populated in order to send to Azure. - :param retention_policy_type: Required. Constant filled by server. - :type retention_policy_type: str - :param retention_duration: Required. It is the minimum duration for which - a backup created, will remain stored in the storage and might get deleted - after that span of time. It should be specified in ISO8601 format. - :type retention_duration: timedelta - :param minimum_number_of_backups: It is the minimum number of backups to - be retained at any point of time. If specified with a non zero value, - backups will not be deleted even if the backups have gone past retention - duration and have number of backups less than or equal to it. + :param retention_policy_type: Required. The type of retention policy. Currently only "Basic" + retention policy is supported.Constant filled by server. Possible values include: "Basic", + "Invalid". + :type retention_policy_type: str or ~azure.servicefabric.models.RetentionPolicyType + :param retention_duration: Required. It is the minimum duration for which a backup created, + will remain stored in the storage and might get deleted after that span of time. It should be + specified in ISO8601 format. + :type retention_duration: ~datetime.timedelta + :param minimum_number_of_backups: It is the minimum number of backups to be retained at any + point of time. If specified with a non zero value, backups will not be deleted even if the + backups have gone past retention duration and have number of backups less than or equal to it. :type minimum_number_of_backups: int """ @@ -3865,24 +4617,31 @@ class BasicRetentionPolicyDescription(RetentionPolicyDescription): 'minimum_number_of_backups': {'key': 'MinimumNumberOfBackups', 'type': 'int'}, } - def __init__(self, *, retention_duration, minimum_number_of_backups: int=None, **kwargs) -> None: + def __init__( + self, + *, + retention_duration: datetime.timedelta, + minimum_number_of_backups: Optional[int] = None, + **kwargs + ): super(BasicRetentionPolicyDescription, self).__init__(**kwargs) + self.retention_policy_type = 'Basic' # type: str self.retention_duration = retention_duration self.minimum_number_of_backups = minimum_number_of_backups - self.retention_policy_type = 'Basic' -class PropertyValue(Model): +class PropertyValue(msrest.serialization.Model): """Describes a Service Fabric property value. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: BinaryPropertyValue, Int64PropertyValue, - DoublePropertyValue, StringPropertyValue, GuidPropertyValue + sub-classes are: BinaryPropertyValue, DoublePropertyValue, GuidPropertyValue, Int64PropertyValue, StringPropertyValue. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind """ _validation = { @@ -3894,12 +4653,15 @@ class PropertyValue(Model): } _subtype_map = { - 'kind': {'Binary': 'BinaryPropertyValue', 'Int64': 'Int64PropertyValue', 'Double': 'DoublePropertyValue', 'String': 'StringPropertyValue', 'Guid': 'GuidPropertyValue'} + 'kind': {'Binary': 'BinaryPropertyValue', 'Double': 'DoublePropertyValue', 'Guid': 'GuidPropertyValue', 'Int64': 'Int64PropertyValue', 'String': 'StringPropertyValue'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(PropertyValue, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class BinaryPropertyValue(PropertyValue): @@ -3907,10 +4669,12 @@ class BinaryPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param data: Required. Array of bytes to be sent as an integer array. Each - element of array is a number between 0 and 255. + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param data: Required. Array of bytes to be sent as an integer array. Each element of array is + a number between 0 and 255. :type data: list[int] """ @@ -3924,25 +4688,28 @@ class BinaryPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': '[int]'}, } - def __init__(self, *, data, **kwargs) -> None: + def __init__( + self, + *, + data: List[int], + **kwargs + ): super(BinaryPropertyValue, self).__init__(**kwargs) + self.kind = 'Binary' # type: str self.data = data - self.kind = 'Binary' -class Chaos(Model): +class Chaos(msrest.serialization.Model): """Contains a description of Chaos. - :param chaos_parameters: If Chaos is running, these are the parameters - Chaos is running with. + :param chaos_parameters: If Chaos is running, these are the parameters Chaos is running with. :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters - :param status: Current status of the Chaos run. Possible values include: - 'Invalid', 'Running', 'Stopped' + :param status: Current status of the Chaos run. Possible values include: "Invalid", "Running", + "Stopped". :type status: str or ~azure.servicefabric.models.ChaosStatus - :param schedule_status: Current status of the schedule. Possible values - include: 'Invalid', 'Stopped', 'Active', 'Expired', 'Pending' - :type schedule_status: str or - ~azure.servicefabric.models.ChaosScheduleStatus + :param schedule_status: Current status of the schedule. Possible values include: "Invalid", + "Stopped", "Active", "Expired", "Pending". + :type schedule_status: str or ~azure.servicefabric.models.ChaosScheduleStatus """ _attribute_map = { @@ -3951,7 +4718,14 @@ class Chaos(Model): 'schedule_status': {'key': 'ScheduleStatus', 'type': 'str'}, } - def __init__(self, *, chaos_parameters=None, status=None, schedule_status=None, **kwargs) -> None: + def __init__( + self, + *, + chaos_parameters: Optional["ChaosParameters"] = None, + status: Optional[Union[str, "ChaosStatus"]] = None, + schedule_status: Optional[Union[str, "ChaosScheduleStatus"]] = None, + **kwargs + ): super(Chaos, self).__init__(**kwargs) self.chaos_parameters = chaos_parameters self.status = status @@ -3963,25 +4737,44 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -3993,15 +4786,14 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): :type service_manifest_name: str :param code_package_name: Required. Code package name. :type code_package_name: str - :param service_package_activation_id: Required. Id of Service package - activation. + :param service_package_activation_id: Required. Id of Service package activation. :type service_package_activation_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4012,11 +4804,11 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -4026,27 +4818,38 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, fault_group_id: str, fault_id: str, node_name: str, service_manifest_name: str, code_package_name: str, service_package_activation_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + fault_group_id: str, + fault_id: str, + node_name: str, + service_manifest_name: str, + code_package_name: str, + service_package_activation_id: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ChaosCodePackageRestartScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'ChaosCodePackageRestartScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.node_name = node_name self.service_manifest_name = service_manifest_name self.code_package_name = code_package_name self.service_package_activation_id = service_package_activation_id - self.kind = 'ChaosCodePackageRestartScheduled' -class ChaosContext(Model): - """Describes a map, which is a collection of (string, string) type key-value - pairs. The map can be used to record information about - the Chaos run. There cannot be more than 100 such pairs and each string - (key or value) can be at most 4095 characters long. - This map is set by the starter of the Chaos run to optionally store the - context about the specific run. +class ChaosContext(msrest.serialization.Model): + """Describes a map, which is a collection of (string, string) type key-value pairs. The map can be used to record information about +the Chaos run. There cannot be more than 100 such pairs and each string (key or value) can be at most 4095 characters long. +This map is set by the starter of the Chaos run to optionally store the context about the specific run. - :param map: Describes a map that contains a collection of - ChaosContextMapItem's. + :param map: Describes a map that contains a collection of ChaosContextMapItem's. :type map: dict[str, str] """ @@ -4054,58 +4857,65 @@ class ChaosContext(Model): 'map': {'key': 'Map', 'type': '{str}'}, } - def __init__(self, *, map=None, **kwargs) -> None: + def __init__( + self, + *, + map: Optional[Dict[str, str]] = None, + **kwargs + ): super(ChaosContext, self).__init__(**kwargs) self.map = map -class ChaosEvent(Model): +class ChaosEvent(msrest.serialization.Model): """Represents an event generated during a Chaos run. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ExecutingFaultsChaosEvent, StartedChaosEvent, - StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, - WaitingChaosEvent + sub-classes are: ExecutingFaultsChaosEvent, StartedChaosEvent, StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, WaitingChaosEvent. All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, } _subtype_map = { 'kind': {'ExecutingFaults': 'ExecutingFaultsChaosEvent', 'Started': 'StartedChaosEvent', 'Stopped': 'StoppedChaosEvent', 'TestError': 'TestErrorChaosEvent', 'ValidationFailed': 'ValidationFailedChaosEvent', 'Waiting': 'WaitingChaosEvent'} } - def __init__(self, *, time_stamp_utc, **kwargs) -> None: + def __init__( + self, + *, + time_stamp_utc: datetime.datetime, + **kwargs + ): super(ChaosEvent, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.time_stamp_utc = time_stamp_utc - self.kind = None -class ChaosEventsSegment(Model): - """Contains the list of Chaos events and the continuation token to get the - next segment. +class ChaosEventsSegment(msrest.serialization.Model): + """Contains the list of Chaos events and the continuation token to get the next segment. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param history: List of Chaos events that meet the user-supplied criteria. :type history: list[~azure.servicefabric.models.ChaosEventWrapper] @@ -4116,13 +4926,19 @@ class ChaosEventsSegment(Model): 'history': {'key': 'History', 'type': '[ChaosEventWrapper]'}, } - def __init__(self, *, continuation_token: str=None, history=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + history: Optional[List["ChaosEventWrapper"]] = None, + **kwargs + ): super(ChaosEventsSegment, self).__init__(**kwargs) self.continuation_token = continuation_token self.history = history -class ChaosEventWrapper(Model): +class ChaosEventWrapper(msrest.serialization.Model): """Wrapper object for Chaos event. :param chaos_event: Represents an event generated during a Chaos run. @@ -4133,7 +4949,12 @@ class ChaosEventWrapper(Model): 'chaos_event': {'key': 'ChaosEvent', 'type': 'ChaosEvent'}, } - def __init__(self, *, chaos_event=None, **kwargs) -> None: + def __init__( + self, + *, + chaos_event: Optional["ChaosEvent"] = None, + **kwargs + ): super(ChaosEventWrapper, self).__init__(**kwargs) self.chaos_event = chaos_event @@ -4142,54 +4963,79 @@ class NodeEvent(FabricEvent): """Represents the base for all Node Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeAbortedEvent, NodeAddedToClusterEvent, - NodeClosedEvent, NodeDeactivateCompletedEvent, NodeDeactivateStartedEvent, - NodeDownEvent, NodeNewHealthReportEvent, NodeHealthReportExpiredEvent, - NodeOpenSucceededEvent, NodeOpenFailedEvent, NodeRemovedFromClusterEvent, - NodeUpEvent, ChaosNodeRestartScheduledEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ChaosNodeRestartScheduledEvent, NodeAbortedEvent, NodeAddedToClusterEvent, NodeClosedEvent, NodeDeactivateCompletedEvent, NodeDeactivateStartedEvent, NodeDownEvent, NodeHealthReportExpiredEvent, NodeNewHealthReportEvent, NodeOpenFailedEvent, NodeOpenSucceededEvent, NodeRemovedFromClusterEvent, NodeUpEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, } _subtype_map = { - 'kind': {'NodeAborted': 'NodeAbortedEvent', 'NodeAddedToCluster': 'NodeAddedToClusterEvent', 'NodeClosed': 'NodeClosedEvent', 'NodeDeactivateCompleted': 'NodeDeactivateCompletedEvent', 'NodeDeactivateStarted': 'NodeDeactivateStartedEvent', 'NodeDown': 'NodeDownEvent', 'NodeNewHealthReport': 'NodeNewHealthReportEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeOpenSucceeded': 'NodeOpenSucceededEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeRemovedFromCluster': 'NodeRemovedFromClusterEvent', 'NodeUp': 'NodeUpEvent', 'ChaosNodeRestartScheduled': 'ChaosNodeRestartScheduledEvent'} - } - - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + 'kind': {'ChaosNodeRestartScheduled': 'ChaosNodeRestartScheduledEvent', 'NodeAborted': 'NodeAbortedEvent', 'NodeAddedToCluster': 'NodeAddedToClusterEvent', 'NodeClosed': 'NodeClosedEvent', 'NodeDeactivateCompleted': 'NodeDeactivateCompletedEvent', 'NodeDeactivateStarted': 'NodeDeactivateStartedEvent', 'NodeDown': 'NodeDownEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeNewHealthReport': 'NodeNewHealthReportEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeOpenSucceeded': 'NodeOpenSucceededEvent', 'NodeRemovedFromCluster': 'NodeRemovedFromClusterEvent', 'NodeUp': 'NodeUpEvent'} + } + + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'NodeEvent' # type: str self.node_name = node_name - self.kind = 'NodeEvent' class ChaosNodeRestartScheduledEvent(NodeEvent): @@ -4197,18 +5043,38 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -4220,9 +5086,9 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -4230,85 +5096,88 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, fault_group_id: str, fault_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance_id: int, + fault_group_id: str, + fault_id: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ChaosNodeRestartScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'ChaosNodeRestartScheduled' # type: str self.node_instance_id = node_instance_id self.fault_group_id = fault_group_id self.fault_id = fault_id - self.kind = 'ChaosNodeRestartScheduled' -class ChaosParameters(Model): +class ChaosParameters(msrest.serialization.Model): """Defines all the parameters to configure a Chaos run. - :param time_to_run_in_seconds: Total time (in seconds) for which Chaos - will run before automatically stopping. The maximum allowed value is - 4,294,967,295 (System.UInt32.MaxValue). Default value: "4294967295" . + :param time_to_run_in_seconds: Total time (in seconds) for which Chaos will run before + automatically stopping. The maximum allowed value is 4,294,967,295 (System.UInt32.MaxValue). :type time_to_run_in_seconds: str - :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of - time to wait for all cluster entities to become stable and healthy. Chaos - executes in iterations and at the start of each iteration it validates the - health of cluster entities. + :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of time to wait for all + cluster entities to become stable and healthy. Chaos executes in iterations and at the start of + each iteration it validates the health of cluster entities. During validation if a cluster entity is not stable and healthy within - MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation - failed event. Default value: 60 . + MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation failed event. :type max_cluster_stabilization_timeout_in_seconds: long - :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of - concurrent faults induced per iteration. - Chaos executes in iterations and two consecutive iterations are separated - by a validation phase. - The higher the concurrency, the more aggressive the injection of faults, - leading to inducing more complex series of states to uncover bugs. - The recommendation is to start with a value of 2 or 3 and to exercise - caution while moving up. Default value: 1 . + :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of concurrent faults + induced per iteration. + Chaos executes in iterations and two consecutive iterations are separated by a validation + phase. + The higher the concurrency, the more aggressive the injection of faults, leading to inducing + more complex series of states to uncover bugs. + The recommendation is to start with a value of 2 or 3 and to exercise caution while moving up. :type max_concurrent_faults: long - :param enable_move_replica_faults: Enables or disables the move primary - and move secondary faults. Default value: True . + :param enable_move_replica_faults: Enables or disables the move primary and move secondary + faults. :type enable_move_replica_faults: bool - :param wait_time_between_faults_in_seconds: Wait time (in seconds) between - consecutive faults within a single iteration. - The larger the value, the lower the overlapping between faults and the - simpler the sequence of state transitions that the cluster goes through. - The recommendation is to start with a value between 1 and 5 and exercise - caution while moving up. Default value: 20 . + :param wait_time_between_faults_in_seconds: Wait time (in seconds) between consecutive faults + within a single iteration. + The larger the value, the lower the overlapping between faults and the simpler the sequence of + state transitions that the cluster goes through. + The recommendation is to start with a value between 1 and 5 and exercise caution while moving + up. :type wait_time_between_faults_in_seconds: long - :param wait_time_between_iterations_in_seconds: Time-separation (in - seconds) between two consecutive iterations of Chaos. - The larger the value, the lower the fault injection rate. Default value: - 30 . + :param wait_time_between_iterations_in_seconds: Time-separation (in seconds) between two + consecutive iterations of Chaos. + The larger the value, the lower the fault injection rate. :type wait_time_between_iterations_in_seconds: long - :param cluster_health_policy: Passed-in cluster health policy is used to - validate health of the cluster in between Chaos iterations. If the cluster - health is in error or if an unexpected exception happens during fault - execution--to provide the cluster with some time to recuperate--Chaos will - wait for 30 minutes before the next health-check. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param context: Describes a map, which is a collection of (string, string) - type key-value pairs. The map can be used to record information about - the Chaos run. There cannot be more than 100 such pairs and each string - (key or value) can be at most 4095 characters long. - This map is set by the starter of the Chaos run to optionally store the - context about the specific run. + :param cluster_health_policy: Passed-in cluster health policy is used to validate health of the + cluster in between Chaos iterations. If the cluster health is in error or if an unexpected + exception happens during fault execution--to provide the cluster with some time to + recuperate--Chaos will wait for 30 minutes before the next health-check. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param context: Describes a map, which is a collection of (string, string) type key-value + pairs. The map can be used to record information about + the Chaos run. There cannot be more than 100 such pairs and each string (key or value) can be + at most 4095 characters long. + This map is set by the starter of the Chaos run to optionally store the context about the + specific run. :type context: ~azure.servicefabric.models.ChaosContext - :param chaos_target_filter: List of cluster entities to target for Chaos - faults. - This filter can be used to target Chaos faults only to certain node types - or only to certain application instances. If ChaosTargetFilter is not - used, Chaos faults all cluster entities. - If ChaosTargetFilter is used, Chaos faults only the entities that meet the - ChaosTargetFilter specification. + :param chaos_target_filter: List of cluster entities to target for Chaos faults. + This filter can be used to target Chaos faults only to certain node types or only to certain + application instances. If ChaosTargetFilter is not used, Chaos faults all cluster entities. + If ChaosTargetFilter is used, Chaos faults only the entities that meet the ChaosTargetFilter + specification. :type chaos_target_filter: ~azure.servicefabric.models.ChaosTargetFilter """ @@ -4331,7 +5200,20 @@ class ChaosParameters(Model): 'chaos_target_filter': {'key': 'ChaosTargetFilter', 'type': 'ChaosTargetFilter'}, } - def __init__(self, *, time_to_run_in_seconds: str="4294967295", max_cluster_stabilization_timeout_in_seconds: int=60, max_concurrent_faults: int=1, enable_move_replica_faults: bool=True, wait_time_between_faults_in_seconds: int=20, wait_time_between_iterations_in_seconds: int=30, cluster_health_policy=None, context=None, chaos_target_filter=None, **kwargs) -> None: + def __init__( + self, + *, + time_to_run_in_seconds: Optional[str] = "4294967295", + max_cluster_stabilization_timeout_in_seconds: Optional[int] = 60, + max_concurrent_faults: Optional[int] = 1, + enable_move_replica_faults: Optional[bool] = True, + wait_time_between_faults_in_seconds: Optional[int] = 20, + wait_time_between_iterations_in_seconds: Optional[int] = 30, + cluster_health_policy: Optional["ClusterHealthPolicy"] = None, + context: Optional["ChaosContext"] = None, + chaos_target_filter: Optional["ChaosTargetFilter"] = None, + **kwargs + ): super(ChaosParameters, self).__init__(**kwargs) self.time_to_run_in_seconds = time_to_run_in_seconds self.max_cluster_stabilization_timeout_in_seconds = max_cluster_stabilization_timeout_in_seconds @@ -4344,16 +5226,15 @@ def __init__(self, *, time_to_run_in_seconds: str="4294967295", max_cluster_stab self.chaos_target_filter = chaos_target_filter -class ChaosParametersDictionaryItem(Model): +class ChaosParametersDictionaryItem(msrest.serialization.Model): """Defines an item in ChaosParametersDictionary of the Chaos Schedule. All required parameters must be populated in order to send to Azure. - :param key: Required. The key identifying the Chaos Parameter in the - dictionary. This key is referenced by Chaos Schedule Jobs. + :param key: Required. The key identifying the Chaos Parameter in the dictionary. This key is + referenced by Chaos Schedule Jobs. :type key: str - :param value: Required. Defines all the parameters to configure a Chaos - run. + :param value: Required. Defines all the parameters to configure a Chaos run. :type value: ~azure.servicefabric.models.ChaosParameters """ @@ -4367,7 +5248,13 @@ class ChaosParametersDictionaryItem(Model): 'value': {'key': 'Value', 'type': 'ChaosParameters'}, } - def __init__(self, *, key: str, value, **kwargs) -> None: + def __init__( + self, + *, + key: str, + value: "ChaosParameters", + **kwargs + ): super(ChaosParametersDictionaryItem, self).__init__(**kwargs) self.key = key self.value = value @@ -4377,57 +5264,82 @@ class PartitionEvent(FabricEvent): """Represents the base for all Partition Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionAnalysisEvent, PartitionNewHealthReportEvent, - PartitionHealthReportExpiredEvent, PartitionReconfiguredEvent, - ChaosPartitionSecondaryMoveScheduledEvent, - ChaosPartitionPrimaryMoveScheduledEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ChaosPartitionPrimaryMoveScheduledEvent, ChaosPartitionSecondaryMoveScheduledEvent, PartitionAnalysisEvent, PartitionHealthReportExpiredEvent, PartitionNewHealthReportEvent, PartitionReconfiguredEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } _subtype_map = { - 'kind': {'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionNewHealthReport': 'PartitionNewHealthReportEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionReconfigured': 'PartitionReconfiguredEvent', 'ChaosPartitionSecondaryMoveScheduled': 'ChaosPartitionSecondaryMoveScheduledEvent', 'ChaosPartitionPrimaryMoveScheduled': 'ChaosPartitionPrimaryMoveScheduledEvent'} - } - - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + 'kind': {'ChaosPartitionPrimaryMoveScheduled': 'ChaosPartitionPrimaryMoveScheduledEvent', 'ChaosPartitionSecondaryMoveScheduled': 'ChaosPartitionSecondaryMoveScheduledEvent', 'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionNewHealthReport': 'PartitionNewHealthReportEvent', 'PartitionReconfigured': 'PartitionReconfiguredEvent'} + } + + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(PartitionEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'PartitionEvent' # type: str self.partition_id = partition_id - self.kind = 'PartitionEvent' class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): @@ -4435,23 +5347,42 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4466,9 +5397,9 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4478,11 +5409,11 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -4491,14 +5422,28 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, fault_group_id: str, fault_id: str, service_name: str, node_to: str, forced_move: bool, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + fault_group_id: str, + fault_id: str, + service_name: str, + node_to: str, + forced_move: bool, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ChaosPartitionPrimaryMoveScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.kind = 'ChaosPartitionPrimaryMoveScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.service_name = service_name self.node_to = node_to self.forced_move = forced_move - self.kind = 'ChaosPartitionPrimaryMoveScheduled' class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): @@ -4506,23 +5451,42 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4539,9 +5503,9 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4552,11 +5516,11 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -4566,84 +5530,122 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, fault_group_id: str, fault_id: str, service_name: str, source_node: str, destination_node: str, forced_move: bool, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + fault_group_id: str, + fault_id: str, + service_name: str, + source_node: str, + destination_node: str, + forced_move: bool, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ChaosPartitionSecondaryMoveScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.kind = 'ChaosPartitionSecondaryMoveScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.service_name = service_name self.source_node = source_node self.destination_node = destination_node self.forced_move = forced_move - self.kind = 'ChaosPartitionSecondaryMoveScheduled' class ReplicaEvent(FabricEvent): """Represents the base for all Replica Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulReplicaNewHealthReportEvent, - StatefulReplicaHealthReportExpiredEvent, - StatelessReplicaNewHealthReportEvent, - StatelessReplicaHealthReportExpiredEvent, - ChaosReplicaRemovalScheduledEvent, ChaosReplicaRestartScheduledEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ChaosReplicaRemovalScheduledEvent, ChaosReplicaRestartScheduledEvent, StatefulReplicaHealthReportExpiredEvent, StatefulReplicaNewHealthReportEvent, StatelessReplicaHealthReportExpiredEvent, StatelessReplicaNewHealthReportEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, } _subtype_map = { - 'kind': {'StatefulReplicaNewHealthReport': 'StatefulReplicaNewHealthReportEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatelessReplicaNewHealthReport': 'StatelessReplicaNewHealthReportEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'ChaosReplicaRemovalScheduled': 'ChaosReplicaRemovalScheduledEvent', 'ChaosReplicaRestartScheduled': 'ChaosReplicaRestartScheduledEvent'} - } - - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + 'kind': {'ChaosReplicaRemovalScheduled': 'ChaosReplicaRemovalScheduledEvent', 'ChaosReplicaRestartScheduled': 'ChaosReplicaRestartScheduledEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatefulReplicaNewHealthReport': 'StatefulReplicaNewHealthReportEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'StatelessReplicaNewHealthReport': 'StatelessReplicaNewHealthReportEvent'} + } + + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + replica_id: int, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ReplicaEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ReplicaEvent' # type: str self.partition_id = partition_id self.replica_id = replica_id - self.kind = 'ReplicaEvent' class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): @@ -4651,31 +5653,48 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4686,9 +5705,9 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -4697,11 +5716,11 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, @@ -4709,12 +5728,25 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, fault_group_id: str, fault_id: str, service_uri: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + replica_id: int, + fault_group_id: str, + fault_id: str, + service_uri: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ChaosReplicaRemovalScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.kind = 'ChaosReplicaRemovalScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.service_uri = service_uri - self.kind = 'ChaosReplicaRemovalScheduled' class ChaosReplicaRestartScheduledEvent(ReplicaEvent): @@ -4722,31 +5754,48 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4757,9 +5806,9 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -4768,11 +5817,11 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, @@ -4780,29 +5829,39 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, fault_group_id: str, fault_id: str, service_uri: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + replica_id: int, + fault_group_id: str, + fault_id: str, + service_uri: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ChaosReplicaRestartScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.kind = 'ChaosReplicaRestartScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.service_uri = service_uri - self.kind = 'ChaosReplicaRestartScheduled' -class ChaosSchedule(Model): +class ChaosSchedule(msrest.serialization.Model): """Defines the schedule used by Chaos. :param start_date: The date and time Chaos will start using this schedule. - Default value: "1601-01-01T00:00:00Z" . - :type start_date: datetime - :param expiry_date: The date and time Chaos will continue to use this - schedule until. Default value: "9999-12-31T23:59:59.999Z" . - :type expiry_date: datetime - :param chaos_parameters_dictionary: A mapping of string names to Chaos - Parameters to be referenced by Chaos Schedule Jobs. + :type start_date: ~datetime.datetime + :param expiry_date: The date and time Chaos will continue to use this schedule until. + :type expiry_date: ~datetime.datetime + :param chaos_parameters_dictionary: A mapping of string names to Chaos Parameters to be + referenced by Chaos Schedule Jobs. :type chaos_parameters_dictionary: list[~azure.servicefabric.models.ChaosParametersDictionaryItem] - :param jobs: A list of all Chaos Schedule Jobs that will be automated by - the schedule. + :param jobs: A list of all Chaos Schedule Jobs that will be automated by the schedule. :type jobs: list[~azure.servicefabric.models.ChaosScheduleJob] """ @@ -4813,7 +5872,15 @@ class ChaosSchedule(Model): 'jobs': {'key': 'Jobs', 'type': '[ChaosScheduleJob]'}, } - def __init__(self, *, start_date="1601-01-01T00:00:00Z", expiry_date="9999-12-31T23:59:59.999Z", chaos_parameters_dictionary=None, jobs=None, **kwargs) -> None: + def __init__( + self, + *, + start_date: Optional[datetime.datetime] = "1601-01-01T00:00:00Z", + expiry_date: Optional[datetime.datetime] = "9999-12-31T23:59:59.999Z", + chaos_parameters_dictionary: Optional[List["ChaosParametersDictionaryItem"]] = None, + jobs: Optional[List["ChaosScheduleJob"]] = None, + **kwargs + ): super(ChaosSchedule, self).__init__(**kwargs) self.start_date = start_date self.expiry_date = expiry_date @@ -4821,9 +5888,8 @@ def __init__(self, *, start_date="1601-01-01T00:00:00Z", expiry_date="9999-12-31 self.jobs = jobs -class ChaosScheduleDescription(Model): - """Defines the Chaos Schedule used by Chaos and the version of the Chaos - Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. +class ChaosScheduleDescription(msrest.serialization.Model): + """Defines the Chaos Schedule used by Chaos and the version of the Chaos Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. :param version: The version number of the Schedule. :type version: int @@ -4840,24 +5906,27 @@ class ChaosScheduleDescription(Model): 'schedule': {'key': 'Schedule', 'type': 'ChaosSchedule'}, } - def __init__(self, *, version: int=None, schedule=None, **kwargs) -> None: + def __init__( + self, + *, + version: Optional[int] = None, + schedule: Optional["ChaosSchedule"] = None, + **kwargs + ): super(ChaosScheduleDescription, self).__init__(**kwargs) self.version = version self.schedule = schedule -class ChaosScheduleJob(Model): - """Defines a repetition rule and parameters of Chaos to be used with the Chaos - Schedule. +class ChaosScheduleJob(msrest.serialization.Model): + """Defines a repetition rule and parameters of Chaos to be used with the Chaos Schedule. - :param chaos_parameters: A reference to which Chaos Parameters of the - Chaos Schedule to use. + :param chaos_parameters: A reference to which Chaos Parameters of the Chaos Schedule to use. :type chaos_parameters: str - :param days: Defines the days of the week that a Chaos Schedule Job will - run for. + :param days: Defines the days of the week that a Chaos Schedule Job will run for. :type days: ~azure.servicefabric.models.ChaosScheduleJobActiveDaysOfWeek - :param times: A list of Time Ranges that specify when during active days - that this job will run. The times are interpreted as UTC. + :param times: A list of Time Ranges that specify when during active days that this job will + run. The times are interpreted as UTC. :type times: list[~azure.servicefabric.models.TimeRange] """ @@ -4867,36 +5936,36 @@ class ChaosScheduleJob(Model): 'times': {'key': 'Times', 'type': '[TimeRange]'}, } - def __init__(self, *, chaos_parameters: str=None, days=None, times=None, **kwargs) -> None: + def __init__( + self, + *, + chaos_parameters: Optional[str] = None, + days: Optional["ChaosScheduleJobActiveDaysOfWeek"] = None, + times: Optional[List["TimeRange"]] = None, + **kwargs + ): super(ChaosScheduleJob, self).__init__(**kwargs) self.chaos_parameters = chaos_parameters self.days = days self.times = times -class ChaosScheduleJobActiveDaysOfWeek(Model): +class ChaosScheduleJobActiveDaysOfWeek(msrest.serialization.Model): """Defines the days of the week that a Chaos Schedule Job will run for. :param sunday: Indicates if the Chaos Schedule Job will run on Sunday. - Default value: False . :type sunday: bool :param monday: Indicates if the Chaos Schedule Job will run on Monday. - Default value: False . :type monday: bool :param tuesday: Indicates if the Chaos Schedule Job will run on Tuesday. - Default value: False . :type tuesday: bool - :param wednesday: Indicates if the Chaos Schedule Job will run on - Wednesday. Default value: False . + :param wednesday: Indicates if the Chaos Schedule Job will run on Wednesday. :type wednesday: bool :param thursday: Indicates if the Chaos Schedule Job will run on Thursday. - Default value: False . :type thursday: bool :param friday: Indicates if the Chaos Schedule Job will run on Friday. - Default value: False . :type friday: bool :param saturday: Indicates if the Chaos Schedule Job will run on Saturday. - Default value: False . :type saturday: bool """ @@ -4910,7 +5979,18 @@ class ChaosScheduleJobActiveDaysOfWeek(Model): 'saturday': {'key': 'Saturday', 'type': 'bool'}, } - def __init__(self, *, sunday: bool=False, monday: bool=False, tuesday: bool=False, wednesday: bool=False, thursday: bool=False, friday: bool=False, saturday: bool=False, **kwargs) -> None: + def __init__( + self, + *, + sunday: Optional[bool] = False, + monday: Optional[bool] = False, + tuesday: Optional[bool] = False, + wednesday: Optional[bool] = False, + thursday: Optional[bool] = False, + friday: Optional[bool] = False, + saturday: Optional[bool] = False, + **kwargs + ): super(ChaosScheduleJobActiveDaysOfWeek, self).__init__(**kwargs) self.sunday = sunday self.monday = monday @@ -4925,49 +6005,73 @@ class ClusterEvent(FabricEvent): """Represents the base for all Cluster Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ClusterNewHealthReportEvent, - ClusterHealthReportExpiredEvent, ClusterUpgradeCompletedEvent, - ClusterUpgradeDomainCompletedEvent, ClusterUpgradeRollbackCompletedEvent, - ClusterUpgradeRollbackStartedEvent, ClusterUpgradeStartedEvent, - ChaosStoppedEvent, ChaosStartedEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ChaosStartedEvent, ChaosStoppedEvent, ClusterHealthReportExpiredEvent, ClusterNewHealthReportEvent, ClusterUpgradeCompletedEvent, ClusterUpgradeDomainCompletedEvent, ClusterUpgradeRollbackCompletedEvent, ClusterUpgradeRollbackStartedEvent, ClusterUpgradeStartedEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'ClusterNewHealthReport': 'ClusterNewHealthReportEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterUpgradeCompleted': 'ClusterUpgradeCompletedEvent', 'ClusterUpgradeDomainCompleted': 'ClusterUpgradeDomainCompletedEvent', 'ClusterUpgradeRollbackCompleted': 'ClusterUpgradeRollbackCompletedEvent', 'ClusterUpgradeRollbackStarted': 'ClusterUpgradeRollbackStartedEvent', 'ClusterUpgradeStarted': 'ClusterUpgradeStartedEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ChaosStarted': 'ChaosStartedEvent'} - } - - def __init__(self, *, event_instance_id: str, time_stamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + 'kind': {'ChaosStarted': 'ChaosStartedEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterNewHealthReport': 'ClusterNewHealthReportEvent', 'ClusterUpgradeCompleted': 'ClusterUpgradeCompletedEvent', 'ClusterUpgradeDomainCompleted': 'ClusterUpgradeDomainCompletedEvent', 'ClusterUpgradeRollbackCompleted': 'ClusterUpgradeRollbackCompletedEvent', 'ClusterUpgradeRollbackStarted': 'ClusterUpgradeRollbackStartedEvent', 'ClusterUpgradeStarted': 'ClusterUpgradeStartedEvent'} + } + + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ClusterEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ClusterEvent' + self.kind = 'ClusterEvent' # type: str class ChaosStartedEvent(ClusterEvent): @@ -4975,34 +6079,51 @@ class ChaosStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param max_concurrent_faults: Required. Maximum number of concurrent - faults. + :param max_concurrent_faults: Required. Maximum number of concurrent faults. :type max_concurrent_faults: long :param time_to_run_in_seconds: Required. Time to run in seconds. :type time_to_run_in_seconds: float - :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum - timeout for cluster stabilization in seconds. + :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum timeout for cluster + stabilization in seconds. :type max_cluster_stabilization_timeout_in_seconds: float - :param wait_time_between_iterations_in_seconds: Required. Wait time - between iterations in seconds. + :param wait_time_between_iterations_in_seconds: Required. Wait time between iterations in + seconds. :type wait_time_between_iterations_in_seconds: float - :param wait_time_between_faults_in_seconds: Required. Wait time between - faults in seconds. + :param wait_time_between_faults_in_seconds: Required. Wait time between faults in seconds. :type wait_time_between_faults_in_seconds: float - :param move_replica_fault_enabled: Required. Indicates MoveReplica fault - is enabled. + :param move_replica_fault_enabled: Required. Indicates MoveReplica fault is enabled. :type move_replica_fault_enabled: bool :param included_node_type_list: Required. List of included Node types. :type included_node_type_list: str @@ -5015,9 +6136,9 @@ class ChaosStartedEvent(ClusterEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'max_concurrent_faults': {'required': True}, 'time_to_run_in_seconds': {'required': True}, 'max_cluster_stabilization_timeout_in_seconds': {'required': True}, @@ -5031,11 +6152,11 @@ class ChaosStartedEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_concurrent_faults': {'key': 'MaxConcurrentFaults', 'type': 'long'}, 'time_to_run_in_seconds': {'key': 'TimeToRunInSeconds', 'type': 'float'}, 'max_cluster_stabilization_timeout_in_seconds': {'key': 'MaxClusterStabilizationTimeoutInSeconds', 'type': 'float'}, @@ -5048,8 +6169,27 @@ class ChaosStartedEvent(ClusterEvent): 'chaos_context': {'key': 'ChaosContext', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, max_concurrent_faults: int, time_to_run_in_seconds: float, max_cluster_stabilization_timeout_in_seconds: float, wait_time_between_iterations_in_seconds: float, wait_time_between_faults_in_seconds: float, move_replica_fault_enabled: bool, included_node_type_list: str, included_application_list: str, cluster_health_policy: str, chaos_context: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + max_concurrent_faults: int, + time_to_run_in_seconds: float, + max_cluster_stabilization_timeout_in_seconds: float, + wait_time_between_iterations_in_seconds: float, + wait_time_between_faults_in_seconds: float, + move_replica_fault_enabled: bool, + included_node_type_list: str, + included_application_list: str, + cluster_health_policy: str, + chaos_context: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ChaosStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ChaosStarted' # type: str self.max_concurrent_faults = max_concurrent_faults self.time_to_run_in_seconds = time_to_run_in_seconds self.max_cluster_stabilization_timeout_in_seconds = max_cluster_stabilization_timeout_in_seconds @@ -5060,7 +6200,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, max_concurrent_faults: self.included_application_list = included_application_list self.cluster_health_policy = cluster_health_policy self.chaos_context = chaos_context - self.kind = 'ChaosStarted' class ChaosStoppedEvent(ClusterEvent): @@ -5068,96 +6207,106 @@ class ChaosStoppedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param reason: Required. Describes reason. :type reason: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'reason': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, reason: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + reason: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ChaosStoppedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ChaosStopped' # type: str self.reason = reason - self.kind = 'ChaosStopped' - - -class ChaosTargetFilter(Model): - """Defines all filters for targeted Chaos faults, for example, faulting only - certain node types or faulting only certain applications. - If ChaosTargetFilter is not used, Chaos faults all cluster entities. If - ChaosTargetFilter is used, Chaos faults only the entities that meet the - ChaosTargetFilter - specification. NodeTypeInclusionList and ApplicationInclusionList allow a - union semantics only. It is not possible to specify an intersection - of NodeTypeInclusionList and ApplicationInclusionList. For example, it is - not possible to specify "fault this application only when it is on that - node type." - Once an entity is included in either NodeTypeInclusionList or - ApplicationInclusionList, that entity cannot be excluded using - ChaosTargetFilter. Even if - applicationX does not appear in ApplicationInclusionList, in some Chaos - iteration applicationX can be faulted because it happens to be on a node of - nodeTypeY that is included - in NodeTypeInclusionList. If both NodeTypeInclusionList and - ApplicationInclusionList are null or empty, an ArgumentException is thrown. - - :param node_type_inclusion_list: A list of node types to include in Chaos - faults. - All types of faults (restart node, restart code package, remove replica, - restart replica, move primary, and move secondary) are enabled for the - nodes of these node types. - If a node type (say NodeTypeX) does not appear in the - NodeTypeInclusionList, then node level faults (like NodeRestart) will - never be enabled for the nodes of - NodeTypeX, but code package and replica faults can still be enabled for - NodeTypeX if an application in the ApplicationInclusionList. + + +class ChaosTargetFilter(msrest.serialization.Model): + """Defines all filters for targeted Chaos faults, for example, faulting only certain node types or faulting only certain applications. +If ChaosTargetFilter is not used, Chaos faults all cluster entities. If ChaosTargetFilter is used, Chaos faults only the entities that meet the ChaosTargetFilter +specification. NodeTypeInclusionList and ApplicationInclusionList allow a union semantics only. It is not possible to specify an intersection +of NodeTypeInclusionList and ApplicationInclusionList. For example, it is not possible to specify "fault this application only when it is on that node type." +Once an entity is included in either NodeTypeInclusionList or ApplicationInclusionList, that entity cannot be excluded using ChaosTargetFilter. Even if +applicationX does not appear in ApplicationInclusionList, in some Chaos iteration applicationX can be faulted because it happens to be on a node of nodeTypeY that is included +in NodeTypeInclusionList. If both NodeTypeInclusionList and ApplicationInclusionList are null or empty, an ArgumentException is thrown. + + :param node_type_inclusion_list: A list of node types to include in Chaos faults. + All types of faults (restart node, restart code package, remove replica, restart replica, move + primary, and move secondary) are enabled for the nodes of these node types. + If a node type (say NodeTypeX) does not appear in the NodeTypeInclusionList, then node level + faults (like NodeRestart) will never be enabled for the nodes of + NodeTypeX, but code package and replica faults can still be enabled for NodeTypeX if an + application in the ApplicationInclusionList. happens to reside on a node of NodeTypeX. - At most 100 node type names can be included in this list, to increase this - number, a config upgrade is required for - MaxNumberOfNodeTypesInChaosEntityFilter configuration. + At most 100 node type names can be included in this list, to increase this number, a config + upgrade is required for MaxNumberOfNodeTypesInChaosEntityFilter configuration. :type node_type_inclusion_list: list[str] - :param application_inclusion_list: A list of application URIs to include - in Chaos faults. - All replicas belonging to services of these applications are amenable to - replica faults (restart replica, remove replica, move primary, and move - secondary) by Chaos. - Chaos may restart a code package only if the code package hosts replicas - of these applications only. - If an application does not appear in this list, it can still be faulted in - some Chaos iteration if the application ends up on a node of a node type - that is included in NodeTypeInclusionList. - However, if applicationX is tied to nodeTypeY through placement - constraints and applicationX is absent from ApplicationInclusionList and - nodeTypeY is absent from NodeTypeInclusionList, then applicationX will - never be faulted. - At most 1000 application names can be included in this list, to increase - this number, a config upgrade is required for - MaxNumberOfApplicationsInChaosEntityFilter configuration. + :param application_inclusion_list: A list of application URIs to include in Chaos faults. + All replicas belonging to services of these applications are amenable to replica faults + (restart replica, remove replica, move primary, and move secondary) by Chaos. + Chaos may restart a code package only if the code package hosts replicas of these applications + only. + If an application does not appear in this list, it can still be faulted in some Chaos + iteration if the application ends up on a node of a node type that is included in + NodeTypeInclusionList. + However, if applicationX is tied to nodeTypeY through placement constraints and applicationX + is absent from ApplicationInclusionList and nodeTypeY is absent from NodeTypeInclusionList, + then applicationX will never be faulted. + At most 1000 application names can be included in this list, to increase this number, a config + upgrade is required for MaxNumberOfApplicationsInChaosEntityFilter configuration. :type application_inclusion_list: list[str] """ @@ -5166,170 +6315,194 @@ class ChaosTargetFilter(Model): 'application_inclusion_list': {'key': 'ApplicationInclusionList', 'type': '[str]'}, } - def __init__(self, *, node_type_inclusion_list=None, application_inclusion_list=None, **kwargs) -> None: + def __init__( + self, + *, + node_type_inclusion_list: Optional[List[str]] = None, + application_inclusion_list: Optional[List[str]] = None, + **kwargs + ): super(ChaosTargetFilter, self).__init__(**kwargs) self.node_type_inclusion_list = node_type_inclusion_list self.application_inclusion_list = application_inclusion_list -class PropertyBatchOperation(Model): - """Represents the base type for property operations that can be put into a - batch and submitted. +class PropertyBatchOperation(msrest.serialization.Model): + """Represents the base type for property operations that can be put into a batch and submitted. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CheckExistsPropertyBatchOperation, - CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation, - DeletePropertyBatchOperation, GetPropertyBatchOperation, - PutPropertyBatchOperation + sub-classes are: CheckExistsPropertyBatchOperation, CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation, DeletePropertyBatchOperation, GetPropertyBatchOperation, PutPropertyBatchOperation. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, } _subtype_map = { 'kind': {'CheckExists': 'CheckExistsPropertyBatchOperation', 'CheckSequence': 'CheckSequencePropertyBatchOperation', 'CheckValue': 'CheckValuePropertyBatchOperation', 'Delete': 'DeletePropertyBatchOperation', 'Get': 'GetPropertyBatchOperation', 'Put': 'PutPropertyBatchOperation'} } - def __init__(self, *, property_name: str, **kwargs) -> None: + def __init__( + self, + *, + property_name: str, + **kwargs + ): super(PropertyBatchOperation, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.property_name = property_name - self.kind = None class CheckExistsPropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that compares the Boolean existence of - a property with the Exists argument. - The PropertyBatchOperation operation fails if the property's existence is - not equal to the Exists argument. - The CheckExistsPropertyBatchOperation is generally used as a precondition - for the write operations in the batch. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that compares the Boolean existence of a property with the Exists argument. +The PropertyBatchOperation operation fails if the property's existence is not equal to the Exists argument. +The CheckExistsPropertyBatchOperation is generally used as a precondition for the write operations in the batch. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str - :param exists: Required. Whether or not the property should exist for the - operation to pass. + :param exists: Required. Whether or not the property should exist for the operation to pass. :type exists: bool """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, 'exists': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'exists': {'key': 'Exists', 'type': 'bool'}, } - def __init__(self, *, property_name: str, exists: bool, **kwargs) -> None: + def __init__( + self, + *, + property_name: str, + exists: bool, + **kwargs + ): super(CheckExistsPropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.kind = 'CheckExists' # type: str self.exists = exists - self.kind = 'CheckExists' class CheckSequencePropertyBatchOperation(PropertyBatchOperation): - """Compares the Sequence Number of a property with the SequenceNumber - argument. - A property's sequence number can be thought of as that property's version. - Every time the property is modified, its sequence number is increased. - The sequence number can be found in a property's metadata. - The comparison fails if the sequence numbers are not equal. - CheckSequencePropertyBatchOperation is generally used as a precondition for - the write operations in the batch. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Compares the Sequence Number of a property with the SequenceNumber argument. +A property's sequence number can be thought of as that property's version. +Every time the property is modified, its sequence number is increased. +The sequence number can be found in a property's metadata. +The comparison fails if the sequence numbers are not equal. +CheckSequencePropertyBatchOperation is generally used as a precondition for the write operations in the batch. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str :param sequence_number: Required. The expected sequence number. :type sequence_number: str """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, 'sequence_number': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__(self, *, property_name: str, sequence_number: str, **kwargs) -> None: + def __init__( + self, + *, + property_name: str, + sequence_number: str, + **kwargs + ): super(CheckSequencePropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.kind = 'CheckSequence' # type: str self.sequence_number = sequence_number - self.kind = 'CheckSequence' class CheckValuePropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that compares the value of the property - with the expected value. - The CheckValuePropertyBatchOperation is generally used as a precondition - for the write operations in the batch. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that compares the value of the property with the expected value. +The CheckValuePropertyBatchOperation is generally used as a precondition for the write operations in the batch. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str :param value: Required. The expected property value. :type value: ~azure.servicefabric.models.PropertyValue """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, 'value': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__(self, *, property_name: str, value, **kwargs) -> None: + def __init__( + self, + *, + property_name: str, + value: "PropertyValue", + **kwargs + ): super(CheckValuePropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.kind = 'CheckValue' # type: str self.value = value - self.kind = 'CheckValue' -class ClusterConfiguration(Model): +class ClusterConfiguration(msrest.serialization.Model): """Information about the standalone cluster configuration. - :param cluster_configuration: The contents of the cluster configuration - file. + :param cluster_configuration: The contents of the cluster configuration file. :type cluster_configuration: str """ @@ -5337,62 +6510,56 @@ class ClusterConfiguration(Model): 'cluster_configuration': {'key': 'ClusterConfiguration', 'type': 'str'}, } - def __init__(self, *, cluster_configuration: str=None, **kwargs) -> None: + def __init__( + self, + *, + cluster_configuration: Optional[str] = None, + **kwargs + ): super(ClusterConfiguration, self).__init__(**kwargs) self.cluster_configuration = cluster_configuration -class ClusterConfigurationUpgradeDescription(Model): +class ClusterConfigurationUpgradeDescription(msrest.serialization.Model): """Describes the parameters for a standalone cluster configuration upgrade. All required parameters must be populated in order to send to Azure. - :param cluster_config: Required. The cluster configuration as a JSON - string. For example, [this - file](https://github.com/Azure-Samples/service-fabric-dotnet-standalone-cluster-configuration/blob/master/Samples/ClusterConfig.Unsecure.DevCluster.json) - contains JSON describing the [nodes and other properties of the - cluster](https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-manifest). + :param cluster_config: Required. The cluster configuration as a JSON string. For example, `this + file + `_ + contains JSON describing the `nodes and other properties of the cluster + `_. :type cluster_config: str - :param health_check_retry_timeout: The length of time between attempts to - perform health checks if the application or cluster is not healthy. - Default value: "PT0H0M0S" . - :type health_check_retry_timeout: timedelta - :param health_check_wait_duration_in_seconds: The length of time to wait - after completing an upgrade domain before starting the health checks - process. Default value: "PT0H0M0S" . - :type health_check_wait_duration_in_seconds: timedelta - :param health_check_stable_duration_in_seconds: The length of time that - the application or cluster must remain healthy before the upgrade proceeds - to the next upgrade domain. Default value: "PT0H0M0S" . - :type health_check_stable_duration_in_seconds: timedelta - :param upgrade_domain_timeout_in_seconds: The timeout for the upgrade - domain. Default value: "PT0H0M0S" . - :type upgrade_domain_timeout_in_seconds: timedelta - :param upgrade_timeout_in_seconds: The upgrade timeout. Default value: - "PT0H0M0S" . - :type upgrade_timeout_in_seconds: timedelta - :param max_percent_unhealthy_applications: The maximum allowed percentage - of unhealthy applications during the upgrade. Allowed values are integer - values from zero to 100. Default value: 0 . + :param health_check_retry_timeout: The length of time between attempts to perform health checks + if the application or cluster is not healthy. + :type health_check_retry_timeout: ~datetime.timedelta + :param health_check_wait_duration_in_seconds: The length of time to wait after completing an + upgrade domain before starting the health checks process. + :type health_check_wait_duration_in_seconds: ~datetime.timedelta + :param health_check_stable_duration_in_seconds: The length of time that the application or + cluster must remain healthy before the upgrade proceeds to the next upgrade domain. + :type health_check_stable_duration_in_seconds: ~datetime.timedelta + :param upgrade_domain_timeout_in_seconds: The timeout for the upgrade domain. + :type upgrade_domain_timeout_in_seconds: ~datetime.timedelta + :param upgrade_timeout_in_seconds: The upgrade timeout. + :type upgrade_timeout_in_seconds: ~datetime.timedelta + :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy + applications during the upgrade. Allowed values are integer values from zero to 100. :type max_percent_unhealthy_applications: int - :param max_percent_unhealthy_nodes: The maximum allowed percentage of - unhealthy nodes during the upgrade. Allowed values are integer values from - zero to 100. Default value: 0 . + :param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes during + the upgrade. Allowed values are integer values from zero to 100. :type max_percent_unhealthy_nodes: int - :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage - of delta health degradation during the upgrade. Allowed values are integer - values from zero to 100. Default value: 0 . + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage of delta health + degradation during the upgrade. Allowed values are integer values from zero to 100. :type max_percent_delta_unhealthy_nodes: int - :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum - allowed percentage of upgrade domain delta health degradation during the - upgrade. Allowed values are integer values from zero to 100. Default - value: 0 . + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum allowed percentage of + upgrade domain delta health degradation during the upgrade. Allowed values are integer values + from zero to 100. :type max_percent_upgrade_domain_delta_unhealthy_nodes: int - :param application_health_policies: Defines the application health policy - map used to evaluate the health of an application or one of its children - entities. - :type application_health_policies: - ~azure.servicefabric.models.ApplicationHealthPolicies + :param application_health_policies: Defines the application health policy map used to evaluate + the health of an application or one of its children entities. + :type application_health_policies: ~azure.servicefabric.models.ApplicationHealthPolicies """ _validation = { @@ -5413,7 +6580,22 @@ class ClusterConfigurationUpgradeDescription(Model): 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, } - def __init__(self, *, cluster_config: str, health_check_retry_timeout="PT0H0M0S", health_check_wait_duration_in_seconds="PT0H0M0S", health_check_stable_duration_in_seconds="PT0H0M0S", upgrade_domain_timeout_in_seconds="PT0H0M0S", upgrade_timeout_in_seconds="PT0H0M0S", max_percent_unhealthy_applications: int=0, max_percent_unhealthy_nodes: int=0, max_percent_delta_unhealthy_nodes: int=0, max_percent_upgrade_domain_delta_unhealthy_nodes: int=0, application_health_policies=None, **kwargs) -> None: + def __init__( + self, + *, + cluster_config: str, + health_check_retry_timeout: Optional[datetime.timedelta] = "PT0H0M0S", + health_check_wait_duration_in_seconds: Optional[datetime.timedelta] = "PT0H0M0S", + health_check_stable_duration_in_seconds: Optional[datetime.timedelta] = "PT0H0M0S", + upgrade_domain_timeout_in_seconds: Optional[datetime.timedelta] = "PT0H0M0S", + upgrade_timeout_in_seconds: Optional[datetime.timedelta] = "PT0H0M0S", + max_percent_unhealthy_applications: Optional[int] = 0, + max_percent_unhealthy_nodes: Optional[int] = 0, + max_percent_delta_unhealthy_nodes: Optional[int] = 0, + max_percent_upgrade_domain_delta_unhealthy_nodes: Optional[int] = 0, + application_health_policies: Optional["ApplicationHealthPolicies"] = None, + **kwargs + ): super(ClusterConfigurationUpgradeDescription, self).__init__(**kwargs) self.cluster_config = cluster_config self.health_check_retry_timeout = health_check_retry_timeout @@ -5428,13 +6610,12 @@ def __init__(self, *, cluster_config: str, health_check_retry_timeout="PT0H0M0S" self.application_health_policies = application_health_policies -class ClusterConfigurationUpgradeStatusInfo(Model): +class ClusterConfigurationUpgradeStatusInfo(msrest.serialization.Model): """Information about a standalone cluster configuration upgrade status. - :param upgrade_state: The state of the upgrade domain. Possible values - include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', - 'RollingForwardPending', 'RollingForwardInProgress', - 'RollingForwardCompleted', 'Failed' + :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", + "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", + "RollingForwardInProgress", "RollingForwardCompleted", "Failed". :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState :param progress_status: The cluster manifest version. :type progress_status: int @@ -5451,7 +6632,15 @@ class ClusterConfigurationUpgradeStatusInfo(Model): 'details': {'key': 'Details', 'type': 'str'}, } - def __init__(self, *, upgrade_state=None, progress_status: int=None, config_version: str=None, details: str=None, **kwargs) -> None: + def __init__( + self, + *, + upgrade_state: Optional[Union[str, "UpgradeState"]] = None, + progress_status: Optional[int] = None, + config_version: Optional[str] = None, + details: Optional[str] = None, + **kwargs + ): super(ClusterConfigurationUpgradeStatusInfo, self).__init__(**kwargs) self.upgrade_state = upgrade_state self.progress_status = progress_status @@ -5461,35 +6650,28 @@ def __init__(self, *, upgrade_state=None, progress_status: int=None, config_vers class ClusterHealth(EntityHealth): """Represents the health of the cluster. - Contains the cluster aggregated health state, the cluster application and - node health states as well as the health events and the unhealthy - evaluations. - - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState +Contains the cluster aggregated health state, the cluster application and node health states as well as the health events and the unhealthy evaluations. + + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param node_health_states: Cluster node health states as found in the - health store. - :type node_health_states: - list[~azure.servicefabric.models.NodeHealthState] - :param application_health_states: Cluster application health states as - found in the health store. - :type application_health_states: - list[~azure.servicefabric.models.ApplicationHealthState] + :param node_health_states: Cluster node health states as found in the health store. + :type node_health_states: list[~azure.servicefabric.models.NodeHealthState] + :param application_health_states: Cluster application health states as found in the health + store. + :type application_health_states: list[~azure.servicefabric.models.ApplicationHealthState] """ _attribute_map = { @@ -5501,33 +6683,38 @@ class ClusterHealth(EntityHealth): 'application_health_states': {'key': 'ApplicationHealthStates', 'type': '[ApplicationHealthState]'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, node_health_states=None, application_health_states=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + node_health_states: Optional[List["NodeHealthState"]] = None, + application_health_states: Optional[List["ApplicationHealthState"]] = None, + **kwargs + ): super(ClusterHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.node_health_states = node_health_states self.application_health_states = application_health_states -class ClusterHealthChunk(Model): +class ClusterHealthChunk(msrest.serialization.Model): """Represents the health chunk of the cluster. - Contains the cluster aggregated health state, and the cluster entities that - respect the input filter. - - :param health_state: The HealthState representing the aggregated health - state of the cluster computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired cluster health policy and - the application health policies. Possible values include: 'Invalid', 'Ok', - 'Warning', 'Error', 'Unknown' +Contains the cluster aggregated health state, and the cluster entities that respect the input filter. + + :param health_state: The HealthState representing the aggregated health state of the cluster + computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired cluster health policy and the application + health policies. Possible values include: "Invalid", "Ok", "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param node_health_state_chunks: The list of node health state chunks in - the cluster that respect the filters in the cluster health chunk query - description. - :type node_health_state_chunks: - ~azure.servicefabric.models.NodeHealthStateChunkList - :param application_health_state_chunks: The list of application health - state chunks in the cluster that respect the filters in the cluster health - chunk query description. + :param node_health_state_chunks: The list of node health state chunks in the cluster that + respect the filters in the cluster health chunk query description. + :type node_health_state_chunks: ~azure.servicefabric.models.NodeHealthStateChunkList + :param application_health_state_chunks: The list of application health state chunks in the + cluster that respect the filters in the cluster health chunk query description. :type application_health_state_chunks: ~azure.servicefabric.models.ApplicationHealthStateChunkList """ @@ -5538,49 +6725,45 @@ class ClusterHealthChunk(Model): 'application_health_state_chunks': {'key': 'ApplicationHealthStateChunks', 'type': 'ApplicationHealthStateChunkList'}, } - def __init__(self, *, health_state=None, node_health_state_chunks=None, application_health_state_chunks=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + node_health_state_chunks: Optional["NodeHealthStateChunkList"] = None, + application_health_state_chunks: Optional["ApplicationHealthStateChunkList"] = None, + **kwargs + ): super(ClusterHealthChunk, self).__init__(**kwargs) self.health_state = health_state self.node_health_state_chunks = node_health_state_chunks self.application_health_state_chunks = application_health_state_chunks -class ClusterHealthChunkQueryDescription(Model): - """The cluster health chunk query description, which can specify the health - policies to evaluate cluster health and very expressive filters to select - which cluster entities to include in response. +class ClusterHealthChunkQueryDescription(msrest.serialization.Model): + """The cluster health chunk query description, which can specify the health policies to evaluate cluster health and very expressive filters to select which cluster entities to include in response. - :param node_filters: Defines a list of filters that specify which nodes to - be included in the returned cluster health chunk. - If no filters are specified, no nodes are returned. All the nodes are used - to evaluate the cluster's aggregated health state, regardless of the input - filters. + :param node_filters: Defines a list of filters that specify which nodes to be included in the + returned cluster health chunk. + If no filters are specified, no nodes are returned. All the nodes are used to evaluate the + cluster's aggregated health state, regardless of the input filters. The cluster health chunk query may specify multiple node filters. - For example, it can specify a filter to return all nodes with health state - Error and another filter to always include a node identified by its - NodeName. - :type node_filters: - list[~azure.servicefabric.models.NodeHealthStateFilter] - :param application_filters: Defines a list of filters that specify which - applications to be included in the returned cluster health chunk. - If no filters are specified, no applications are returned. All the - applications are used to evaluate the cluster's aggregated health state, - regardless of the input filters. + For example, it can specify a filter to return all nodes with health state Error and another + filter to always include a node identified by its NodeName. + :type node_filters: list[~azure.servicefabric.models.NodeHealthStateFilter] + :param application_filters: Defines a list of filters that specify which applications to be + included in the returned cluster health chunk. + If no filters are specified, no applications are returned. All the applications are used to + evaluate the cluster's aggregated health state, regardless of the input filters. The cluster health chunk query may specify multiple application filters. - For example, it can specify a filter to return all applications with - health state Error and another filter to always include applications of a - specified application type. - :type application_filters: - list[~azure.servicefabric.models.ApplicationHealthStateFilter] - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param application_health_policies: Defines the application health policy - map used to evaluate the health of an application or one of its children - entities. - :type application_health_policies: - ~azure.servicefabric.models.ApplicationHealthPolicies + For example, it can specify a filter to return all applications with health state Error and + another filter to always include applications of a specified application type. + :type application_filters: list[~azure.servicefabric.models.ApplicationHealthStateFilter] + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param application_health_policies: Defines the application health policy map used to evaluate + the health of an application or one of its children entities. + :type application_health_policies: ~azure.servicefabric.models.ApplicationHealthPolicies """ _attribute_map = { @@ -5590,7 +6773,15 @@ class ClusterHealthChunkQueryDescription(Model): 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, } - def __init__(self, *, node_filters=None, application_filters=None, cluster_health_policy=None, application_health_policies=None, **kwargs) -> None: + def __init__( + self, + *, + node_filters: Optional[List["NodeHealthStateFilter"]] = None, + application_filters: Optional[List["ApplicationHealthStateFilter"]] = None, + cluster_health_policy: Optional["ClusterHealthPolicy"] = None, + application_health_policies: Optional["ApplicationHealthPolicies"] = None, + **kwargs + ): super(ClusterHealthChunkQueryDescription, self).__init__(**kwargs) self.node_filters = node_filters self.application_filters = application_filters @@ -5598,24 +6789,22 @@ def __init__(self, *, node_filters=None, application_filters=None, cluster_healt self.application_health_policies = application_health_policies -class ClusterHealthPolicies(Model): +class ClusterHealthPolicies(msrest.serialization.Model): """Health policies to evaluate cluster health. - :param application_health_policy_map: Defines a map that contains specific - application health policies for different applications. - Each entry specifies as key the application name and as value an - ApplicationHealthPolicy used to evaluate the application health. - If an application is not specified in the map, the application health - evaluation uses the ApplicationHealthPolicy found in its application - manifest or the default application health policy (if no health policy is - defined in the manifest). + :param application_health_policy_map: Defines a map that contains specific application health + policies for different applications. + Each entry specifies as key the application name and as value an ApplicationHealthPolicy used + to evaluate the application health. + If an application is not specified in the map, the application health evaluation uses the + ApplicationHealthPolicy found in its application manifest or the default application health + policy (if no health policy is defined in the manifest). The map is empty by default. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy """ _attribute_map = { @@ -5623,71 +6812,109 @@ class ClusterHealthPolicies(Model): 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, } - def __init__(self, *, application_health_policy_map=None, cluster_health_policy=None, **kwargs) -> None: + def __init__( + self, + *, + application_health_policy_map: Optional[List["ApplicationHealthPolicyMapItem"]] = None, + cluster_health_policy: Optional["ClusterHealthPolicy"] = None, + **kwargs + ): super(ClusterHealthPolicies, self).__init__(**kwargs) self.application_health_policy_map = application_health_policy_map self.cluster_health_policy = cluster_health_policy -class ClusterHealthPolicy(Model): - """Defines a health policy used to evaluate the health of the cluster or of a - cluster node. +class ClusterHealthPolicy(msrest.serialization.Model): + """Defines a health policy used to evaluate the health of the cluster or of a cluster node. - :param consider_warning_as_error: Indicates whether warnings are treated - with the same severity as errors. Default value: False . + :param consider_warning_as_error: Indicates whether warnings are treated with the same severity + as errors. :type consider_warning_as_error: bool - :param max_percent_unhealthy_nodes: The maximum allowed percentage of - unhealthy nodes before reporting an error. For example, to allow 10% of - nodes to be unhealthy, this value would be 10. - The percentage represents the maximum tolerated percentage of nodes that - can be unhealthy before the cluster is considered in error. - If the percentage is respected but there is at least one unhealthy node, - the health is evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy nodes - over the total number of nodes in the cluster. - The computation rounds up to tolerate one failure on small numbers of - nodes. Default percentage is zero. - In large clusters, some nodes will always be down or out for repairs, so - this percentage should be configured to tolerate that. Default value: 0 . + :param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes before + reporting an error. For example, to allow 10% of nodes to be unhealthy, this value would be 10. + + The percentage represents the maximum tolerated percentage of nodes that can be unhealthy + before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy node, the health is + evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy nodes over the total number + of nodes in the cluster. + The computation rounds up to tolerate one failure on small numbers of nodes. Default + percentage is zero. + + In large clusters, some nodes will always be down or out for repairs, so this percentage + should be configured to tolerate that. :type max_percent_unhealthy_nodes: int - :param max_percent_unhealthy_applications: The maximum allowed percentage - of unhealthy applications before reporting an error. For example, to allow - 10% of applications to be unhealthy, this value would be 10. - The percentage represents the maximum tolerated percentage of applications - that can be unhealthy before the cluster is considered in error. - If the percentage is respected but there is at least one unhealthy - application, the health is evaluated as Warning. - This is calculated by dividing the number of unhealthy applications over - the total number of application instances in the cluster, excluding - applications of application types that are included in the - ApplicationTypeHealthPolicyMap. - The computation rounds up to tolerate one failure on small numbers of - applications. Default percentage is zero. Default value: 0 . + :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy + applications before reporting an error. For example, to allow 10% of applications to be + unhealthy, this value would be 10. + + The percentage represents the maximum tolerated percentage of applications that can be + unhealthy before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy application, the health is + evaluated as Warning. + This is calculated by dividing the number of unhealthy applications over the total number of + application instances in the cluster, excluding applications of application types that are + included in the ApplicationTypeHealthPolicyMap. + The computation rounds up to tolerate one failure on small numbers of applications. Default + percentage is zero. :type max_percent_unhealthy_applications: int - :param application_type_health_policy_map: Defines a map with max - percentage unhealthy applications for specific application types. - Each entry specifies as key the application type name and as value an - integer that represents the MaxPercentUnhealthyApplications percentage - used to evaluate the applications of the specified application type. - The application type health policy map can be used during cluster health - evaluation to describe special application types. - The application types included in the map are evaluated against the - percentage specified in the map, and not with the global - MaxPercentUnhealthyApplications defined in the cluster health policy. - The applications of application types specified in the map are not counted - against the global pool of applications. - For example, if some applications of a type are critical, the cluster - administrator can add an entry to the map for that application type + :param application_type_health_policy_map: Defines a map with max percentage unhealthy + applications for specific application types. + Each entry specifies as key the application type name and as value an integer that represents + the MaxPercentUnhealthyApplications percentage used to evaluate the applications of the + specified application type. + + The application type health policy map can be used during cluster health evaluation to + describe special application types. + The application types included in the map are evaluated against the percentage specified in + the map, and not with the global MaxPercentUnhealthyApplications defined in the cluster health + policy. + The applications of application types specified in the map are not counted against the global + pool of applications. + For example, if some applications of a type are critical, the cluster administrator can add an + entry to the map for that application type and assign it a value of 0% (that is, do not tolerate any failures). - All other applications can be evaluated with - MaxPercentUnhealthyApplications set to 20% to tolerate some failures out - of the thousands of application instances. - The application type health policy map is used only if the cluster - manifest enables application type health evaluation using the - configuration entry for + All other applications can be evaluated with MaxPercentUnhealthyApplications set to 20% to + tolerate some failures out of the thousands of application instances. + The application type health policy map is used only if the cluster manifest enables + application type health evaluation using the configuration entry for HealthManager/EnableApplicationTypeHealthEvaluation. :type application_type_health_policy_map: list[~azure.servicefabric.models.ApplicationTypeHealthPolicyMapItem] + :param node_type_health_policy_map: Defines a map with max percentage unhealthy nodes for + specific node types. + Each entry specifies as key the node type name and as value an integer that represents the + MaxPercentUnhealthyNodes percentage used to evaluate the nodes of the specified node type. + + The node type health policy map can be used during cluster health evaluation to describe + special node types. + They are evaluated against the percentages associated with their node type name in the map. + Setting this has no impact on the global pool of nodes used for MaxPercentUnhealthyNodes. + The node type health policy map is used only if the cluster manifest enables node type health + evaluation using the configuration entry for HealthManager/EnableNodeTypeHealthEvaluation. + + For example, given a cluster with many nodes of different types, with important work hosted on + node type "SpecialNodeType" that should not tolerate any nodes down. + You can specify global MaxPercentUnhealthyNodes to 20% to tolerate some failures for all + nodes, but for the node type "SpecialNodeType", set the MaxPercentUnhealthyNodes to 0 by + setting the value in the key value pair in NodeTypeHealthPolicyMapItem. The key is the node + type name. + This way, as long as no nodes of type "SpecialNodeType" are in Error state, + even if some of the many nodes in the global pool are in Error state, but below the global + unhealthy percentage, the cluster would be evaluated to Warning. + A Warning health state does not impact cluster upgrade or other monitoring triggered by Error + health state. + But even one node of type SpecialNodeType in Error would make cluster unhealthy (in Error + rather than Warning/Ok), which triggers rollback or pauses the cluster upgrade, depending on + the upgrade configuration. + + Conversely, setting the global MaxPercentUnhealthyNodes to 0, and setting SpecialNodeType's + max percent unhealthy nodes to 100, + with one node of type SpecialNodeType in Error state would still put the cluster in an Error + state, since the global restriction is more strict in this case. + :type node_type_health_policy_map: + list[~azure.servicefabric.models.NodeTypeHealthPolicyMapItem] """ _attribute_map = { @@ -5695,14 +6922,25 @@ class ClusterHealthPolicy(Model): 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'application_type_health_policy_map': {'key': 'ApplicationTypeHealthPolicyMap', 'type': '[ApplicationTypeHealthPolicyMapItem]'}, - } - - def __init__(self, *, consider_warning_as_error: bool=False, max_percent_unhealthy_nodes: int=0, max_percent_unhealthy_applications: int=0, application_type_health_policy_map=None, **kwargs) -> None: + 'node_type_health_policy_map': {'key': 'NodeTypeHealthPolicyMap', 'type': '[NodeTypeHealthPolicyMapItem]'}, + } + + def __init__( + self, + *, + consider_warning_as_error: Optional[bool] = False, + max_percent_unhealthy_nodes: Optional[int] = 0, + max_percent_unhealthy_applications: Optional[int] = 0, + application_type_health_policy_map: Optional[List["ApplicationTypeHealthPolicyMapItem"]] = None, + node_type_health_policy_map: Optional[List["NodeTypeHealthPolicyMapItem"]] = None, + **kwargs + ): super(ClusterHealthPolicy, self).__init__(**kwargs) self.consider_warning_as_error = consider_warning_as_error self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes self.max_percent_unhealthy_applications = max_percent_unhealthy_applications self.application_type_health_policy_map = application_type_health_policy_map + self.node_type_health_policy_map = node_type_health_policy_map class ClusterHealthReportExpiredEvent(ClusterEvent): @@ -5710,18 +6948,38 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param source_id: Required. Id of report source. :type source_id: str :param property: Required. Describes the property. @@ -5734,17 +6992,16 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, 'health_state': {'required': True}, @@ -5756,11 +7013,11 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, @@ -5771,8 +7028,25 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ClusterHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ClusterHealthReportExpired' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -5781,23 +7055,18 @@ def __init__(self, *, event_instance_id: str, time_stamp, source_id: str, proper self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'ClusterHealthReportExpired' -class ClusterLoadInfo(Model): - """Information about load in a Service Fabric cluster. It holds a summary of - all metrics and their load in a cluster. +class ClusterLoadInfo(msrest.serialization.Model): + """Information about load in a Service Fabric cluster. It holds a summary of all metrics and their load in a cluster. - :param last_balancing_start_time_utc: The starting time of last resource - balancing run. - :type last_balancing_start_time_utc: datetime - :param last_balancing_end_time_utc: The end time of last resource - balancing run. - :type last_balancing_end_time_utc: datetime - :param load_metric_information: List that contains metrics and their load - information in this cluster. - :type load_metric_information: - list[~azure.servicefabric.models.LoadMetricInformation] + :param last_balancing_start_time_utc: The starting time of last resource balancing run. + :type last_balancing_start_time_utc: ~datetime.datetime + :param last_balancing_end_time_utc: The end time of last resource balancing run. + :type last_balancing_end_time_utc: ~datetime.datetime + :param load_metric_information: List that contains metrics and their load information in this + cluster. + :type load_metric_information: list[~azure.servicefabric.models.LoadMetricInformation] """ _attribute_map = { @@ -5806,14 +7075,21 @@ class ClusterLoadInfo(Model): 'load_metric_information': {'key': 'LoadMetricInformation', 'type': '[LoadMetricInformation]'}, } - def __init__(self, *, last_balancing_start_time_utc=None, last_balancing_end_time_utc=None, load_metric_information=None, **kwargs) -> None: + def __init__( + self, + *, + last_balancing_start_time_utc: Optional[datetime.datetime] = None, + last_balancing_end_time_utc: Optional[datetime.datetime] = None, + load_metric_information: Optional[List["LoadMetricInformation"]] = None, + **kwargs + ): super(ClusterLoadInfo, self).__init__(**kwargs) self.last_balancing_start_time_utc = last_balancing_start_time_utc self.last_balancing_end_time_utc = last_balancing_end_time_utc self.load_metric_information = load_metric_information -class ClusterManifest(Model): +class ClusterManifest(msrest.serialization.Model): """Information about the cluster manifest. :param manifest: The contents of the cluster manifest file. @@ -5824,7 +7100,12 @@ class ClusterManifest(Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__(self, *, manifest: str=None, **kwargs) -> None: + def __init__( + self, + *, + manifest: Optional[str] = None, + **kwargs + ): super(ClusterManifest, self).__init__(**kwargs) self.manifest = manifest @@ -5834,18 +7115,38 @@ class ClusterNewHealthReportEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param source_id: Required. Id of report source. :type source_id: str :param property: Required. Describes the property. @@ -5858,17 +7159,16 @@ class ClusterNewHealthReportEvent(ClusterEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, 'health_state': {'required': True}, @@ -5880,11 +7180,11 @@ class ClusterNewHealthReportEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, @@ -5895,8 +7195,25 @@ class ClusterNewHealthReportEvent(ClusterEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ClusterNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ClusterNewHealthReport' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -5905,7 +7222,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, source_id: str, proper self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'ClusterNewHealthReport' class ClusterUpgradeCompletedEvent(ClusterEvent): @@ -5913,102 +7229,122 @@ class ClusterUpgradeCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of - upgrade in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in + milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + target_cluster_version: str, + overall_upgrade_elapsed_time_in_ms: float, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ClusterUpgradeCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ClusterUpgradeCompleted' # type: str self.target_cluster_version = target_cluster_version self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms - self.kind = 'ClusterUpgradeCompleted' -class ClusterUpgradeDescriptionObject(Model): +class ClusterUpgradeDescriptionObject(msrest.serialization.Model): """Represents a ServiceFabric cluster upgrade. - :param config_version: The cluster configuration version (specified in the - cluster manifest). + :param config_version: The cluster configuration version (specified in the cluster manifest). :type config_version: str :param code_version: The ServiceFabric code version of the cluster. :type code_version: str - :param upgrade_kind: The kind of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling'. Default value: - "Rolling" . + :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values + include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through - the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', - 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default - value: "Default" . + :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible + values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", + "ReverseLexicographical". Default value: "Default". :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param enable_delta_health_evaluation: When true, enables delta health - evaluation rather than absolute health evaluation after completion of each - upgrade domain. + :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than + absolute health evaluation after completion of each upgrade domain. :type enable_delta_health_evaluation: bool - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param cluster_upgrade_health_policy: Defines a health policy used to - evaluate the health of the cluster during a cluster upgrade. + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of + the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Represents the map of application - health policies for a ServiceFabric cluster upgrade + :param application_health_policy_map: Represents the map of application health policies for a + ServiceFabric cluster upgrade. :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicyMapObject """ @@ -6028,7 +7364,23 @@ class ClusterUpgradeDescriptionObject(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicyMapObject'}, } - def __init__(self, *, config_version: str=None, code_version: str=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, sort_order="Default", enable_delta_health_evaluation: bool=None, monitoring_policy=None, cluster_health_policy=None, cluster_upgrade_health_policy=None, application_health_policy_map=None, **kwargs) -> None: + def __init__( + self, + *, + config_version: Optional[str] = None, + code_version: Optional[str] = None, + upgrade_kind: Optional[Union[str, "UpgradeKind"]] = "Rolling", + rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", + upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, + force_restart: Optional[bool] = False, + sort_order: Optional[Union[str, "UpgradeSortOrder"]] = "Default", + enable_delta_health_evaluation: Optional[bool] = None, + monitoring_policy: Optional["MonitoringPolicyDescription"] = None, + cluster_health_policy: Optional["ClusterHealthPolicy"] = None, + cluster_upgrade_health_policy: Optional["ClusterUpgradeHealthPolicyObject"] = None, + application_health_policy_map: Optional["ApplicationHealthPolicyMapObject"] = None, + **kwargs + ): super(ClusterUpgradeDescriptionObject, self).__init__(**kwargs) self.config_version = config_version self.code_version = code_version @@ -6049,33 +7401,53 @@ class ClusterUpgradeDomainCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param upgrade_state: Required. State of upgrade. :type upgrade_state: str :param upgrade_domains: Required. Upgrade domains. :type upgrade_domains: str - :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain - upgrade in milli-seconds. + :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain upgrade in + milli-seconds. :type upgrade_domain_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'upgrade_state': {'required': True}, 'upgrade_domains': {'required': True}, @@ -6083,46 +7455,53 @@ class ClusterUpgradeDomainCompletedEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, 'upgrade_domains': {'key': 'UpgradeDomains', 'type': 'str'}, 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, upgrade_state: str, upgrade_domains: str, upgrade_domain_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + target_cluster_version: str, + upgrade_state: str, + upgrade_domains: str, + upgrade_domain_elapsed_time_in_ms: float, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ClusterUpgradeDomainCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ClusterUpgradeDomainCompleted' # type: str self.target_cluster_version = target_cluster_version self.upgrade_state = upgrade_state self.upgrade_domains = upgrade_domains self.upgrade_domain_elapsed_time_in_ms = upgrade_domain_elapsed_time_in_ms - self.kind = 'ClusterUpgradeDomainCompleted' -class ClusterUpgradeHealthPolicyObject(Model): - """Defines a health policy used to evaluate the health of the cluster during a - cluster upgrade. +class ClusterUpgradeHealthPolicyObject(msrest.serialization.Model): + """Defines a health policy used to evaluate the health of the cluster during a cluster upgrade. - :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage - of nodes health degradation allowed during cluster upgrades. The delta is - measured between the state of the nodes at the beginning of upgrade and - the state of the nodes at the time of the health evaluation. The check is - performed after every upgrade domain upgrade completion to make sure the - global state of the cluster is within tolerated limits. The default value - is 10%. + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage of nodes health + degradation allowed during cluster upgrades. The delta is measured between the state of the + nodes at the beginning of upgrade and the state of the nodes at the time of the health + evaluation. The check is performed after every upgrade domain upgrade completion to make sure + the global state of the cluster is within tolerated limits. The default value is 10%. :type max_percent_delta_unhealthy_nodes: int - :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum - allowed percentage of upgrade domain nodes health degradation allowed - during cluster upgrades. The delta is measured between the state of the - upgrade domain nodes at the beginning of upgrade and the state of the - upgrade domain nodes at the time of the health evaluation. The check is - performed after every upgrade domain upgrade completion for all completed - upgrade domains to make sure the state of the upgrade domains is within - tolerated limits. The default value is 15%. + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum allowed percentage of + upgrade domain nodes health degradation allowed during cluster upgrades. The delta is measured + between the state of the upgrade domain nodes at the beginning of upgrade and the state of the + upgrade domain nodes at the time of the health evaluation. The check is performed after every + upgrade domain upgrade completion for all completed upgrade domains to make sure the state of + the upgrade domains is within tolerated limits. The default value is 15%. :type max_percent_upgrade_domain_delta_unhealthy_nodes: int """ @@ -6136,63 +7515,63 @@ class ClusterUpgradeHealthPolicyObject(Model): 'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'MaxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'}, } - def __init__(self, *, max_percent_delta_unhealthy_nodes: int=None, max_percent_upgrade_domain_delta_unhealthy_nodes: int=None, **kwargs) -> None: + def __init__( + self, + *, + max_percent_delta_unhealthy_nodes: Optional[int] = None, + max_percent_upgrade_domain_delta_unhealthy_nodes: Optional[int] = None, + **kwargs + ): super(ClusterUpgradeHealthPolicyObject, self).__init__(**kwargs) self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes self.max_percent_upgrade_domain_delta_unhealthy_nodes = max_percent_upgrade_domain_delta_unhealthy_nodes -class ClusterUpgradeProgressObject(Model): +class ClusterUpgradeProgressObject(msrest.serialization.Model): """Information about a cluster upgrade. :param code_version: The ServiceFabric code version of the cluster. :type code_version: str - :param config_version: The cluster configuration version (specified in the - cluster manifest). + :param config_version: The cluster configuration version (specified in the cluster manifest). :type config_version: str :param upgrade_domains: List of upgrade domains and their statuses. :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] - :param upgrade_state: The state of the upgrade domain. Possible values - include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', - 'RollingForwardPending', 'RollingForwardInProgress', - 'RollingForwardCompleted', 'Failed' + :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", + "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", + "RollingForwardInProgress", "RollingForwardCompleted", "Failed". :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState - :param next_upgrade_domain: The name of the next upgrade domain to be - processed. + :param next_upgrade_domain: The name of the next upgrade domain to be processed. :type next_upgrade_domain: str - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_description: Represents a ServiceFabric cluster upgrade - :type upgrade_description: - ~azure.servicefabric.models.ClusterUpgradeDescriptionObject - :param upgrade_duration_in_milliseconds: The estimated elapsed time spent - processing the current overall upgrade. + :param upgrade_description: Represents a ServiceFabric cluster upgrade. + :type upgrade_description: ~azure.servicefabric.models.ClusterUpgradeDescriptionObject + :param upgrade_duration_in_milliseconds: The estimated elapsed time spent processing the + current overall upgrade. :type upgrade_duration_in_milliseconds: str - :param upgrade_domain_duration_in_milliseconds: The estimated elapsed time - spent processing the current upgrade domain. + :param upgrade_domain_duration_in_milliseconds: The estimated elapsed time spent processing the + current upgrade domain. :type upgrade_domain_duration_in_milliseconds: str - :param unhealthy_evaluations: List of health evaluations that resulted in - the current aggregated health state. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current - in-progress upgrade domain. + :param unhealthy_evaluations: List of health evaluations that resulted in the current + aggregated health state. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current in-progress upgrade + domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo :param start_timestamp_utc: The start time of the upgrade in UTC. :type start_timestamp_utc: str :param failure_timestamp_utc: The failure time of the upgrade in UTC. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in - FailureAction being executed. Possible values include: 'None', - 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', - 'OverallUpgradeTimeout' + :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being + executed. Possible values include: "None", "Interrupted", "HealthCheck", + "UpgradeDomainTimeout", "OverallUpgradeTimeout". :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: The detailed upgrade progress - for nodes in the current upgrade domain at the point of failure. + :param upgrade_domain_progress_at_failure: The detailed upgrade progress for nodes in the + current upgrade domain at the point of failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailedUpgradeDomainProgressObject """ @@ -6215,7 +7594,26 @@ class ClusterUpgradeProgressObject(Model): 'upgrade_domain_progress_at_failure': {'key': 'UpgradeDomainProgressAtFailure', 'type': 'FailedUpgradeDomainProgressObject'}, } - def __init__(self, *, code_version: str=None, config_version: str=None, upgrade_domains=None, upgrade_state=None, next_upgrade_domain: str=None, rolling_upgrade_mode="UnmonitoredAuto", upgrade_description=None, upgrade_duration_in_milliseconds: str=None, upgrade_domain_duration_in_milliseconds: str=None, unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc: str=None, failure_timestamp_utc: str=None, failure_reason=None, upgrade_domain_progress_at_failure=None, **kwargs) -> None: + def __init__( + self, + *, + code_version: Optional[str] = None, + config_version: Optional[str] = None, + upgrade_domains: Optional[List["UpgradeDomainInfo"]] = None, + upgrade_state: Optional[Union[str, "UpgradeState"]] = None, + next_upgrade_domain: Optional[str] = None, + rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", + upgrade_description: Optional["ClusterUpgradeDescriptionObject"] = None, + upgrade_duration_in_milliseconds: Optional[str] = None, + upgrade_domain_duration_in_milliseconds: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + current_upgrade_domain_progress: Optional["CurrentUpgradeDomainProgressInfo"] = None, + start_timestamp_utc: Optional[str] = None, + failure_timestamp_utc: Optional[str] = None, + failure_reason: Optional[Union[str, "FailureReason"]] = None, + upgrade_domain_progress_at_failure: Optional["FailedUpgradeDomainProgressObject"] = None, + **kwargs + ): super(ClusterUpgradeProgressObject, self).__init__(**kwargs) self.code_version = code_version self.config_version = config_version @@ -6239,53 +7637,84 @@ class ClusterUpgradeRollbackCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param failure_reason: Required. Describes failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of - upgrade in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in + milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'failure_reason': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + target_cluster_version: str, + failure_reason: str, + overall_upgrade_elapsed_time_in_ms: float, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ClusterUpgradeRollbackCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ClusterUpgradeRollbackCompleted' # type: str self.target_cluster_version = target_cluster_version self.failure_reason = failure_reason self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms - self.kind = 'ClusterUpgradeRollbackCompleted' class ClusterUpgradeRollbackStartedEvent(ClusterEvent): @@ -6293,53 +7722,84 @@ class ClusterUpgradeRollbackStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param failure_reason: Required. Describes failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of - upgrade in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in + milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'failure_reason': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + target_cluster_version: str, + failure_reason: str, + overall_upgrade_elapsed_time_in_ms: float, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ClusterUpgradeRollbackStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ClusterUpgradeRollbackStarted' # type: str self.target_cluster_version = target_cluster_version self.failure_reason = failure_reason self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms - self.kind = 'ClusterUpgradeRollbackStarted' class ClusterUpgradeStartedEvent(ClusterEvent): @@ -6347,18 +7807,38 @@ class ClusterUpgradeStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param current_cluster_version: Required. Current Cluster version. :type current_cluster_version: str :param target_cluster_version: Required. Target Cluster version. @@ -6372,9 +7852,9 @@ class ClusterUpgradeStartedEvent(ClusterEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'current_cluster_version': {'required': True}, 'target_cluster_version': {'required': True}, 'upgrade_type': {'required': True}, @@ -6383,11 +7863,11 @@ class ClusterUpgradeStartedEvent(ClusterEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'current_cluster_version': {'key': 'CurrentClusterVersion', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'upgrade_type': {'key': 'UpgradeType', 'type': 'str'}, @@ -6395,17 +7875,30 @@ class ClusterUpgradeStartedEvent(ClusterEvent): 'failure_action': {'key': 'FailureAction', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, current_cluster_version: str, target_cluster_version: str, upgrade_type: str, rolling_upgrade_mode: str, failure_action: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + current_cluster_version: str, + target_cluster_version: str, + upgrade_type: str, + rolling_upgrade_mode: str, + failure_action: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ClusterUpgradeStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ClusterUpgradeStarted' # type: str self.current_cluster_version = current_cluster_version self.target_cluster_version = target_cluster_version self.upgrade_type = upgrade_type self.rolling_upgrade_mode = rolling_upgrade_mode self.failure_action = failure_action - self.kind = 'ClusterUpgradeStarted' -class ClusterVersion(Model): +class ClusterVersion(msrest.serialization.Model): """The cluster version. :param version: The Service Fabric cluster runtime version. @@ -6416,38 +7909,39 @@ class ClusterVersion(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, *, version: str=None, **kwargs) -> None: + def __init__( + self, + *, + version: Optional[str] = None, + **kwargs + ): super(ClusterVersion, self).__init__(**kwargs) self.version = version -class CodePackageEntryPoint(Model): - """Information about setup or main entry point of a code package deployed on a - Service Fabric node. +class CodePackageEntryPoint(msrest.serialization.Model): + """Information about setup or main entry point of a code package deployed on a Service Fabric node. - :param entry_point_location: The location of entry point executable on the - node. + :param entry_point_location: The location of entry point executable on the node. :type entry_point_location: str :param process_id: The process ID of the entry point. :type process_id: str - :param run_as_user_name: The user name under which entry point executable - is run on the node. + :param run_as_user_name: The user name under which entry point executable is run on the node. :type run_as_user_name: str - :param code_package_entry_point_statistics: Statistics about setup or main - entry point of a code package deployed on a Service Fabric node. + :param code_package_entry_point_statistics: Statistics about setup or main entry point of a + code package deployed on a Service Fabric node. :type code_package_entry_point_statistics: ~azure.servicefabric.models.CodePackageEntryPointStatistics - :param status: Specifies the status of the code package entry point - deployed on a Service Fabric node. Possible values include: 'Invalid', - 'Pending', 'Starting', 'Started', 'Stopping', 'Stopped' + :param status: Specifies the status of the code package entry point deployed on a Service + Fabric node. Possible values include: "Invalid", "Pending", "Starting", "Started", "Stopping", + "Stopped". :type status: str or ~azure.servicefabric.models.EntryPointStatus - :param next_activation_time: The time (in UTC) when the entry point - executable will be run next. - :type next_activation_time: datetime - :param instance_id: The instance ID for current running entry point. For a - code package setup entry point (if specified) runs first and after it - finishes main entry point is started. Each time entry point executable is - run, its instance id will change. + :param next_activation_time: The time (in UTC) when the entry point executable will be run + next. + :type next_activation_time: ~datetime.datetime + :param instance_id: The instance ID for current running entry point. For a code package setup + entry point (if specified) runs first and after it finishes main entry point is started. Each + time entry point executable is run, its instance id will change. :type instance_id: str """ @@ -6461,7 +7955,18 @@ class CodePackageEntryPoint(Model): 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, *, entry_point_location: str=None, process_id: str=None, run_as_user_name: str=None, code_package_entry_point_statistics=None, status=None, next_activation_time=None, instance_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + entry_point_location: Optional[str] = None, + process_id: Optional[str] = None, + run_as_user_name: Optional[str] = None, + code_package_entry_point_statistics: Optional["CodePackageEntryPointStatistics"] = None, + status: Optional[Union[str, "EntryPointStatus"]] = None, + next_activation_time: Optional[datetime.datetime] = None, + instance_id: Optional[str] = None, + **kwargs + ): super(CodePackageEntryPoint, self).__init__(**kwargs) self.entry_point_location = entry_point_location self.process_id = process_id @@ -6472,39 +7977,35 @@ def __init__(self, *, entry_point_location: str=None, process_id: str=None, run_ self.instance_id = instance_id -class CodePackageEntryPointStatistics(Model): - """Statistics about setup or main entry point of a code package deployed on a - Service Fabric node. +class CodePackageEntryPointStatistics(msrest.serialization.Model): + """Statistics about setup or main entry point of a code package deployed on a Service Fabric node. :param last_exit_code: The last exit code of the entry point. :type last_exit_code: str - :param last_activation_time: The last time (in UTC) when Service Fabric - attempted to run the entry point. - :type last_activation_time: datetime - :param last_exit_time: The last time (in UTC) when the entry point - finished running. - :type last_exit_time: datetime - :param last_successful_activation_time: The last time (in UTC) when the - entry point ran successfully. - :type last_successful_activation_time: datetime - :param last_successful_exit_time: The last time (in UTC) when the entry - point finished running gracefully. - :type last_successful_exit_time: datetime + :param last_activation_time: The last time (in UTC) when Service Fabric attempted to run the + entry point. + :type last_activation_time: ~datetime.datetime + :param last_exit_time: The last time (in UTC) when the entry point finished running. + :type last_exit_time: ~datetime.datetime + :param last_successful_activation_time: The last time (in UTC) when the entry point ran + successfully. + :type last_successful_activation_time: ~datetime.datetime + :param last_successful_exit_time: The last time (in UTC) when the entry point finished running + gracefully. + :type last_successful_exit_time: ~datetime.datetime :param activation_count: Number of times the entry point has run. :type activation_count: str - :param activation_failure_count: Number of times the entry point failed to - run. + :param activation_failure_count: Number of times the entry point failed to run. :type activation_failure_count: str - :param continuous_activation_failure_count: Number of times the entry - point continuously failed to run. + :param continuous_activation_failure_count: Number of times the entry point continuously failed + to run. :type continuous_activation_failure_count: str :param exit_count: Number of times the entry point finished running. :type exit_count: str - :param exit_failure_count: Number of times the entry point failed to exit - gracefully. + :param exit_failure_count: Number of times the entry point failed to exit gracefully. :type exit_failure_count: str - :param continuous_exit_failure_count: Number of times the entry point - continuously failed to exit gracefully. + :param continuous_exit_failure_count: Number of times the entry point continuously failed to + exit gracefully. :type continuous_exit_failure_count: str """ @@ -6522,7 +8023,22 @@ class CodePackageEntryPointStatistics(Model): 'continuous_exit_failure_count': {'key': 'ContinuousExitFailureCount', 'type': 'str'}, } - def __init__(self, *, last_exit_code: str=None, last_activation_time=None, last_exit_time=None, last_successful_activation_time=None, last_successful_exit_time=None, activation_count: str=None, activation_failure_count: str=None, continuous_activation_failure_count: str=None, exit_count: str=None, exit_failure_count: str=None, continuous_exit_failure_count: str=None, **kwargs) -> None: + def __init__( + self, + *, + last_exit_code: Optional[str] = None, + last_activation_time: Optional[datetime.datetime] = None, + last_exit_time: Optional[datetime.datetime] = None, + last_successful_activation_time: Optional[datetime.datetime] = None, + last_successful_exit_time: Optional[datetime.datetime] = None, + activation_count: Optional[str] = None, + activation_failure_count: Optional[str] = None, + continuous_activation_failure_count: Optional[str] = None, + exit_count: Optional[str] = None, + exit_failure_count: Optional[str] = None, + continuous_exit_failure_count: Optional[str] = None, + **kwargs + ): super(CodePackageEntryPointStatistics, self).__init__(**kwargs) self.last_exit_code = last_exit_code self.last_activation_time = last_activation_time @@ -6537,20 +8053,17 @@ def __init__(self, *, last_exit_code: str=None, last_activation_time=None, last_ self.continuous_exit_failure_count = continuous_exit_failure_count -class ComposeDeploymentStatusInfo(Model): +class ComposeDeploymentStatusInfo(msrest.serialization.Model): """Information about a Service Fabric compose deployment. :param name: The name of the deployment. :type name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param status: The status of the compose deployment. Possible values - include: 'Invalid', 'Provisioning', 'Creating', 'Ready', 'Unprovisioning', - 'Deleting', 'Failed', 'Upgrading' + :param status: The status of the compose deployment. Possible values include: "Invalid", + "Provisioning", "Creating", "Ready", "Unprovisioning", "Deleting", "Failed", "Upgrading". :type status: str or ~azure.servicefabric.models.ComposeDeploymentStatus - :param status_details: The status details of compose deployment including - failure message. + :param status_details: The status details of compose deployment including failure message. :type status_details: str """ @@ -6561,7 +8074,15 @@ class ComposeDeploymentStatusInfo(Model): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__(self, *, name: str=None, application_name: str=None, status=None, status_details: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + application_name: Optional[str] = None, + status: Optional[Union[str, "ComposeDeploymentStatus"]] = None, + status_details: Optional[str] = None, + **kwargs + ): super(ComposeDeploymentStatusInfo, self).__init__(**kwargs) self.name = name self.application_name = application_name @@ -6569,48 +8090,40 @@ def __init__(self, *, name: str=None, application_name: str=None, status=None, s self.status_details = status_details -class ComposeDeploymentUpgradeDescription(Model): +class ComposeDeploymentUpgradeDescription(msrest.serialization.Model): """Describes the parameters for a compose deployment upgrade. All required parameters must be populated in order to send to Azure. :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: Required. The content of the compose file - that describes the deployment to create. + :param compose_file_content: Required. The content of the compose file that describes the + deployment to create. :type compose_file_content: str - :param registry_credential: Credential information to connect to container - registry. + :param registry_credential: Credential information to connect to container registry. :type registry_credential: ~azure.servicefabric.models.RegistryCredential - :param upgrade_kind: Required. The kind of upgrade out of the following - possible values. Possible values include: 'Invalid', 'Rolling'. Default - value: "Rolling" . + :param upgrade_kind: Required. The kind of upgrade out of the following possible values. + Possible values include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate - the health of an application or one of its children entities. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate the health of an + application or one of its children entities. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy """ _validation = { @@ -6631,7 +8144,20 @@ class ComposeDeploymentUpgradeDescription(Model): 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, } - def __init__(self, *, deployment_name: str, compose_file_content: str, registry_credential=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, monitoring_policy=None, application_health_policy=None, **kwargs) -> None: + def __init__( + self, + *, + deployment_name: str, + compose_file_content: str, + upgrade_kind: Union[str, "UpgradeKind"] = "Rolling", + registry_credential: Optional["RegistryCredential"] = None, + rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", + upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, + force_restart: Optional[bool] = False, + monitoring_policy: Optional["MonitoringPolicyDescription"] = None, + application_health_policy: Optional["ApplicationHealthPolicy"] = None, + **kwargs + ): super(ComposeDeploymentUpgradeDescription, self).__init__(**kwargs) self.deployment_name = deployment_name self.compose_file_content = compose_file_content @@ -6644,92 +8170,78 @@ def __init__(self, *, deployment_name: str, compose_file_content: str, registry_ self.application_health_policy = application_health_policy -class ComposeDeploymentUpgradeProgressInfo(Model): +class ComposeDeploymentUpgradeProgressInfo(msrest.serialization.Model): """Describes the parameters for a compose deployment upgrade. :param deployment_name: The name of the target deployment. :type deployment_name: str - :param application_name: The name of the target application, including the - 'fabric:' URI scheme. + :param application_name: The name of the target application, including the 'fabric:' URI + scheme. :type application_name: str - :param upgrade_state: The state of the compose deployment upgrade. - Possible values include: 'Invalid', 'ProvisioningTarget', - 'RollingForwardInProgress', 'RollingForwardPending', - 'UnprovisioningCurrent', 'RollingForwardCompleted', - 'RollingBackInProgress', 'UnprovisioningTarget', 'RollingBackCompleted', - 'Failed' - :type upgrade_state: str or - ~azure.servicefabric.models.ComposeDeploymentUpgradeState - :param upgrade_status_details: Additional detailed information about the - status of the pending upgrade. + :param upgrade_state: The state of the compose deployment upgrade. Possible values include: + "Invalid", "ProvisioningTarget", "RollingForwardInProgress", "RollingForwardPending", + "UnprovisioningCurrent", "RollingForwardCompleted", "RollingBackInProgress", + "UnprovisioningTarget", "RollingBackCompleted", "Failed". + :type upgrade_state: str or ~azure.servicefabric.models.ComposeDeploymentUpgradeState + :param upgrade_status_details: Additional detailed information about the status of the pending + upgrade. :type upgrade_status_details: str - :param upgrade_kind: The kind of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling'. Default value: - "Rolling" . + :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values + include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate - the health of an application or one of its children entities. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param target_application_type_version: The target application type - version (found in the application manifest) for the application upgrade. + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate the health of an + application or one of its children entities. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :param target_application_type_version: The target application type version (found in the + application manifest) for the application upgrade. :type target_application_type_version: str - :param upgrade_duration: The estimated amount of time that the overall - upgrade elapsed. It is first interpreted as a string representing an ISO - 8601 duration. If that fails, then it is interpreted as a number - representing the total number of milliseconds. + :param upgrade_duration: The estimated amount of time that the overall upgrade elapsed. It is + first interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type upgrade_duration: str - :param current_upgrade_domain_duration: The estimated amount of time spent - processing current Upgrade Domain. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param current_upgrade_domain_duration: The estimated amount of time spent processing current + Upgrade Domain. It is first interpreted as a string representing an ISO 8601 duration. If that + fails, then it is interpreted as a number representing the total number of milliseconds. :type current_upgrade_domain_duration: str - :param application_unhealthy_evaluations: List of health evaluations that - resulted in the current aggregated health state. + :param application_unhealthy_evaluations: List of health evaluations that resulted in the + current aggregated health state. :type application_unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current - in-progress upgrade domain. + :param current_upgrade_domain_progress: Information about the current in-progress upgrade + domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo - :param start_timestamp_utc: The estimated UTC datetime when the upgrade - started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. :type start_timestamp_utc: str - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade - failed and FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and + FailureAction was executed. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in - FailureAction being executed. Possible values include: 'None', - 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', - 'OverallUpgradeTimeout' + :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being + executed. Possible values include: "None", "Interrupted", "HealthCheck", + "UpgradeDomainTimeout", "OverallUpgradeTimeout". :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: Information about the upgrade - domain progress at the time of upgrade failure. + :param upgrade_domain_progress_at_failure: Information about the upgrade domain progress at the + time of upgrade failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo - :param application_upgrade_status_details: Additional details of - application upgrade including failure message. + :param application_upgrade_status_details: Additional details of application upgrade including + failure message. :type application_upgrade_status_details: str """ @@ -6756,7 +8268,31 @@ class ComposeDeploymentUpgradeProgressInfo(Model): 'application_upgrade_status_details': {'key': 'ApplicationUpgradeStatusDetails', 'type': 'str'}, } - def __init__(self, *, deployment_name: str=None, application_name: str=None, upgrade_state=None, upgrade_status_details: str=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", force_restart: bool=None, upgrade_replica_set_check_timeout_in_seconds: int=None, monitoring_policy=None, application_health_policy=None, target_application_type_version: str=None, upgrade_duration: str=None, current_upgrade_domain_duration: str=None, application_unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc: str=None, failure_timestamp_utc: str=None, failure_reason=None, upgrade_domain_progress_at_failure=None, application_upgrade_status_details: str=None, **kwargs) -> None: + def __init__( + self, + *, + deployment_name: Optional[str] = None, + application_name: Optional[str] = None, + upgrade_state: Optional[Union[str, "ComposeDeploymentUpgradeState"]] = None, + upgrade_status_details: Optional[str] = None, + upgrade_kind: Optional[Union[str, "UpgradeKind"]] = "Rolling", + rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", + force_restart: Optional[bool] = False, + upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, + monitoring_policy: Optional["MonitoringPolicyDescription"] = None, + application_health_policy: Optional["ApplicationHealthPolicy"] = None, + target_application_type_version: Optional[str] = None, + upgrade_duration: Optional[str] = "PT0H2M0S", + current_upgrade_domain_duration: Optional[str] = "PT0H2M0S", + application_unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + current_upgrade_domain_progress: Optional["CurrentUpgradeDomainProgressInfo"] = None, + start_timestamp_utc: Optional[str] = None, + failure_timestamp_utc: Optional[str] = None, + failure_reason: Optional[Union[str, "FailureReason"]] = None, + upgrade_domain_progress_at_failure: Optional["FailureUpgradeDomainProgressInfo"] = None, + application_upgrade_status_details: Optional[str] = None, + **kwargs + ): super(ComposeDeploymentUpgradeProgressInfo, self).__init__(**kwargs) self.deployment_name = deployment_name self.application_name = application_name @@ -6780,23 +8316,21 @@ def __init__(self, *, deployment_name: str=None, application_name: str=None, upg self.application_upgrade_status_details = application_upgrade_status_details -class ConfigParameterOverride(Model): +class ConfigParameterOverride(msrest.serialization.Model): """Information about a configuration parameter override. All required parameters must be populated in order to send to Azure. - :param section_name: Required. Name of the section for the parameter - override. + :param section_name: Required. Name of the section for the parameter override. :type section_name: str - :param parameter_name: Required. Name of the parameter that has been - overridden. + :param parameter_name: Required. Name of the parameter that has been overridden. :type parameter_name: str :param parameter_value: Required. Value of the overridden parameter. :type parameter_value: str :param timeout: The duration until config override is considered as valid. - :type timeout: timedelta - :param persist_across_upgrade: A value that indicates whether config - override will be removed on upgrade or will still be considered as valid. + :type timeout: ~datetime.timedelta + :param persist_across_upgrade: A value that indicates whether config override will be removed + on upgrade or will still be considered as valid. :type persist_across_upgrade: bool """ @@ -6814,7 +8348,16 @@ class ConfigParameterOverride(Model): 'persist_across_upgrade': {'key': 'PersistAcrossUpgrade', 'type': 'bool'}, } - def __init__(self, *, section_name: str, parameter_name: str, parameter_value: str, timeout=None, persist_across_upgrade: bool=None, **kwargs) -> None: + def __init__( + self, + *, + section_name: str, + parameter_name: str, + parameter_value: str, + timeout: Optional[datetime.timedelta] = None, + persist_across_upgrade: Optional[bool] = None, + **kwargs + ): super(ConfigParameterOverride, self).__init__(**kwargs) self.section_name = section_name self.parameter_name = parameter_name @@ -6823,19 +8366,19 @@ def __init__(self, *, section_name: str, parameter_name: str, parameter_value: s self.persist_across_upgrade = persist_across_upgrade -class ContainerApiRequestBody(Model): +class ContainerApiRequestBody(msrest.serialization.Model): """parameters for making container API call. All required parameters must be populated in order to send to Azure. - :param http_verb: HTTP verb of container REST API, defaults to "GET" + :param http_verb: HTTP verb of container REST API, defaults to "GET". :type http_verb: str - :param uri_path: Required. URI path of container REST API + :param uri_path: Required. URI path of container REST API. :type uri_path: str - :param content_type: Content type of container REST API request, defaults - to "application/json" + :param content_type: Content type of container REST API request, defaults to + "application/json". :type content_type: str - :param body: HTTP request body of container REST API + :param body: HTTP request body of container REST API. :type body: str """ @@ -6850,7 +8393,15 @@ class ContainerApiRequestBody(Model): 'body': {'key': 'Body', 'type': 'str'}, } - def __init__(self, *, uri_path: str, http_verb: str=None, content_type: str=None, body: str=None, **kwargs) -> None: + def __init__( + self, + *, + uri_path: str, + http_verb: Optional[str] = None, + content_type: Optional[str] = None, + body: Optional[str] = None, + **kwargs + ): super(ContainerApiRequestBody, self).__init__(**kwargs) self.http_verb = http_verb self.uri_path = uri_path @@ -6858,7 +8409,7 @@ def __init__(self, *, uri_path: str, http_verb: str=None, content_type: str=None self.body = body -class ContainerApiResponse(Model): +class ContainerApiResponse(msrest.serialization.Model): """Response body that wraps container API result. All required parameters must be populated in order to send to Azure. @@ -6875,24 +8426,28 @@ class ContainerApiResponse(Model): 'container_api_result': {'key': 'ContainerApiResult', 'type': 'ContainerApiResult'}, } - def __init__(self, *, container_api_result, **kwargs) -> None: + def __init__( + self, + *, + container_api_result: "ContainerApiResult", + **kwargs + ): super(ContainerApiResponse, self).__init__(**kwargs) self.container_api_result = container_api_result -class ContainerApiResult(Model): +class ContainerApiResult(msrest.serialization.Model): """Container API result. All required parameters must be populated in order to send to Azure. - :param status: Required. HTTP status code returned by the target container - API + :param status: Required. HTTP status code returned by the target container API. :type status: int - :param content_type: HTTP content type + :param content_type: HTTP content type. :type content_type: str - :param content_encoding: HTTP content encoding + :param content_encoding: HTTP content encoding. :type content_encoding: str - :param body: container API result body + :param body: container API result body. :type body: str """ @@ -6907,7 +8462,15 @@ class ContainerApiResult(Model): 'body': {'key': 'Body', 'type': 'str'}, } - def __init__(self, *, status: int, content_type: str=None, content_encoding: str=None, body: str=None, **kwargs) -> None: + def __init__( + self, + *, + status: int, + content_type: Optional[str] = None, + content_encoding: Optional[str] = None, + body: Optional[str] = None, + **kwargs + ): super(ContainerApiResult, self).__init__(**kwargs) self.status = status self.content_type = content_type @@ -6915,11 +8478,10 @@ def __init__(self, *, status: int, content_type: str=None, content_encoding: str self.body = body -class ContainerCodePackageProperties(Model): +class ContainerCodePackageProperties(msrest.serialization.Model): """Describes a container and its runtime properties. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. @@ -6928,21 +8490,16 @@ class ContainerCodePackageProperties(Model): :param image: Required. The Container image to use. :type image: str :param image_registry_credential: Image registry credential. - :type image_registry_credential: - ~azure.servicefabric.models.ImageRegistryCredential + :type image_registry_credential: ~azure.servicefabric.models.ImageRegistryCredential :param entry_point: Override for the default entry point in the container. :type entry_point: str - :param commands: Command array to execute within the container in exec - form. + :param commands: Command array to execute within the container in exec form. :type commands: list[str] - :param environment_variables: The environment variables to set in this - container - :type environment_variables: - list[~azure.servicefabric.models.EnvironmentVariable] - :param settings: The settings to set in this container. The setting file - path can be fetched from environment variable "Fabric_SettingPath". The - path for Windows container is "C:\\\\secrets". The path for Linux - container is "/var/secrets". + :param environment_variables: The environment variables to set in this container. + :type environment_variables: list[~azure.servicefabric.models.EnvironmentVariable] + :param settings: The settings to set in this container. The setting file path can be fetched + from environment variable "Fabric_SettingPath". The path for Windows container is "C:\secrets". + The path for Linux container is "/var/secrets". :type settings: list[~azure.servicefabric.models.Setting] :param labels: The labels to set in this container. :type labels: list[~azure.servicefabric.models.ContainerLabel] @@ -6950,26 +8507,24 @@ class ContainerCodePackageProperties(Model): :type endpoints: list[~azure.servicefabric.models.EndpointProperties] :param resources: Required. The resources required by this container. :type resources: ~azure.servicefabric.models.ResourceRequirements - :param volume_refs: Volumes to be attached to the container. The lifetime - of these volumes is independent of the application's lifetime. + :param volume_refs: Volumes to be attached to the container. The lifetime of these volumes is + independent of the application's lifetime. :type volume_refs: list[~azure.servicefabric.models.VolumeReference] - :param volumes: Volumes to be attached to the container. The lifetime of - these volumes is scoped to the application's lifetime. + :param volumes: Volumes to be attached to the container. The lifetime of these volumes is + scoped to the application's lifetime. :type volumes: list[~azure.servicefabric.models.ApplicationScopedVolume] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef - :param reliable_collections_refs: A list of ReliableCollection resources - used by this particular code package. Please refer to - ReliableCollectionsRef for more details. - :type reliable_collections_refs: - list[~azure.servicefabric.models.ReliableCollectionsRef] + :param reliable_collections_refs: A list of ReliableCollection resources used by this + particular code package. Please refer to ReliableCollectionsRef for more details. + :type reliable_collections_refs: list[~azure.servicefabric.models.ReliableCollectionsRef] :ivar instance_view: Runtime information of a container instance. :vartype instance_view: ~azure.servicefabric.models.ContainerInstanceView - :param liveness_probe: An array of liveness probes for a code package. It - determines when to restart a code package. + :param liveness_probe: An array of liveness probes for a code package. It determines when to + restart a code package. :type liveness_probe: list[~azure.servicefabric.models.Probe] - :param readiness_probe: An array of readiness probes for a code package. - It determines when to unpublish an endpoint. + :param readiness_probe: An array of readiness probes for a code package. It determines when to + unpublish an endpoint. :type readiness_probe: list[~azure.servicefabric.models.Probe] """ @@ -7000,7 +8555,27 @@ class ContainerCodePackageProperties(Model): 'readiness_probe': {'key': 'readinessProbe', 'type': '[Probe]'}, } - def __init__(self, *, name: str, image: str, resources, image_registry_credential=None, entry_point: str=None, commands=None, environment_variables=None, settings=None, labels=None, endpoints=None, volume_refs=None, volumes=None, diagnostics=None, reliable_collections_refs=None, liveness_probe=None, readiness_probe=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + image: str, + resources: "ResourceRequirements", + image_registry_credential: Optional["ImageRegistryCredential"] = None, + entry_point: Optional[str] = None, + commands: Optional[List[str]] = None, + environment_variables: Optional[List["EnvironmentVariable"]] = None, + settings: Optional[List["Setting"]] = None, + labels: Optional[List["ContainerLabel"]] = None, + endpoints: Optional[List["EndpointProperties"]] = None, + volume_refs: Optional[List["VolumeReference"]] = None, + volumes: Optional[List["ApplicationScopedVolume"]] = None, + diagnostics: Optional["DiagnosticsRef"] = None, + reliable_collections_refs: Optional[List["ReliableCollectionsRef"]] = None, + liveness_probe: Optional[List["Probe"]] = None, + readiness_probe: Optional[List["Probe"]] = None, + **kwargs + ): super(ContainerCodePackageProperties, self).__init__(**kwargs) self.name = name self.image = image @@ -7021,7 +8596,7 @@ def __init__(self, *, name: str, image: str, resources, image_registry_credentia self.readiness_probe = readiness_probe -class ContainerEvent(Model): +class ContainerEvent(msrest.serialization.Model): """A container event. :param name: The name of the container event. @@ -7032,7 +8607,7 @@ class ContainerEvent(Model): :type first_timestamp: str :param last_timestamp: Date/time of the last event. :type last_timestamp: str - :param message: The event message + :param message: The event message. :type message: str :param type: The event type. :type type: str @@ -7047,7 +8622,17 @@ class ContainerEvent(Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__(self, *, name: str=None, count: int=None, first_timestamp: str=None, last_timestamp: str=None, message: str=None, type: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + count: Optional[int] = None, + first_timestamp: Optional[str] = None, + last_timestamp: Optional[str] = None, + message: Optional[str] = None, + type: Optional[str] = None, + **kwargs + ): super(ContainerEvent, self).__init__(**kwargs) self.name = name self.count = count @@ -7062,44 +8647,71 @@ class ContainerInstanceEvent(FabricEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ContainerInstanceEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ContainerInstanceEvent' + self.kind = 'ContainerInstanceEvent' # type: str -class ContainerInstanceView(Model): +class ContainerInstanceView(msrest.serialization.Model): """Runtime information of a container instance. - :param restart_count: The number of times the container has been - restarted. + :param restart_count: The number of times the container has been restarted. :type restart_count: int :param current_state: Current container instance state. :type current_state: ~azure.servicefabric.models.ContainerState @@ -7116,7 +8728,15 @@ class ContainerInstanceView(Model): 'events': {'key': 'events', 'type': '[ContainerEvent]'}, } - def __init__(self, *, restart_count: int=None, current_state=None, previous_state=None, events=None, **kwargs) -> None: + def __init__( + self, + *, + restart_count: Optional[int] = None, + current_state: Optional["ContainerState"] = None, + previous_state: Optional["ContainerState"] = None, + events: Optional[List["ContainerEvent"]] = None, + **kwargs + ): super(ContainerInstanceView, self).__init__(**kwargs) self.restart_count = restart_count self.current_state = current_state @@ -7124,7 +8744,7 @@ def __init__(self, *, restart_count: int=None, current_state=None, previous_stat self.events = events -class ContainerLabel(Model): +class ContainerLabel(msrest.serialization.Model): """Describes a container label. All required parameters must be populated in order to send to Azure. @@ -7145,13 +8765,19 @@ class ContainerLabel(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, *, name: str, value: str, **kwargs) -> None: + def __init__( + self, + *, + name: str, + value: str, + **kwargs + ): super(ContainerLabel, self).__init__(**kwargs) self.name = name self.value = value -class ContainerLogs(Model): +class ContainerLogs(msrest.serialization.Model): """Container logs. :param content: Container logs. @@ -7162,22 +8788,27 @@ class ContainerLogs(Model): 'content': {'key': 'Content', 'type': 'str'}, } - def __init__(self, *, content: str=None, **kwargs) -> None: + def __init__( + self, + *, + content: Optional[str] = None, + **kwargs + ): super(ContainerLogs, self).__init__(**kwargs) self.content = content -class ContainerState(Model): +class ContainerState(msrest.serialization.Model): """The container state. - :param state: The state of this container + :param state: The state of this container. :type state: str :param start_time: Date/time when the container state started. - :type start_time: datetime + :type start_time: ~datetime.datetime :param exit_code: The container exit code. :type exit_code: str :param finish_time: Date/time when the container state finished. - :type finish_time: datetime + :type finish_time: ~datetime.datetime :param detail_status: Human-readable status of this state. :type detail_status: str """ @@ -7190,7 +8821,16 @@ class ContainerState(Model): 'detail_status': {'key': 'detailStatus', 'type': 'str'}, } - def __init__(self, *, state: str=None, start_time=None, exit_code: str=None, finish_time=None, detail_status: str=None, **kwargs) -> None: + def __init__( + self, + *, + state: Optional[str] = None, + start_time: Optional[datetime.datetime] = None, + exit_code: Optional[str] = None, + finish_time: Optional[datetime.datetime] = None, + detail_status: Optional[str] = None, + **kwargs + ): super(ContainerState, self).__init__(**kwargs) self.state = state self.start_time = start_time @@ -7199,18 +8839,17 @@ def __init__(self, *, state: str=None, start_time=None, exit_code: str=None, fin self.detail_status = detail_status -class CreateComposeDeploymentDescription(Model): +class CreateComposeDeploymentDescription(msrest.serialization.Model): """Defines description for creating a Service Fabric compose deployment. All required parameters must be populated in order to send to Azure. :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: Required. The content of the compose file - that describes the deployment to create. + :param compose_file_content: Required. The content of the compose file that describes the + deployment to create. :type compose_file_content: str - :param registry_credential: Credential information to connect to container - registry. + :param registry_credential: Credential information to connect to container registry. :type registry_credential: ~azure.servicefabric.models.RegistryCredential """ @@ -7225,22 +8864,27 @@ class CreateComposeDeploymentDescription(Model): 'registry_credential': {'key': 'RegistryCredential', 'type': 'RegistryCredential'}, } - def __init__(self, *, deployment_name: str, compose_file_content: str, registry_credential=None, **kwargs) -> None: + def __init__( + self, + *, + deployment_name: str, + compose_file_content: str, + registry_credential: Optional["RegistryCredential"] = None, + **kwargs + ): super(CreateComposeDeploymentDescription, self).__init__(**kwargs) self.deployment_name = deployment_name self.compose_file_content = compose_file_content self.registry_credential = registry_credential -class CurrentUpgradeDomainProgressInfo(Model): +class CurrentUpgradeDomainProgressInfo(msrest.serialization.Model): """Information about the current in-progress upgrade domain. - :param domain_name: The name of the upgrade domain + :param domain_name: The name of the upgrade domain. :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their - statuses - :type node_upgrade_progress_list: - list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their statuses. + :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -7248,41 +8892,51 @@ class CurrentUpgradeDomainProgressInfo(Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__(self, *, domain_name: str=None, node_upgrade_progress_list=None, **kwargs) -> None: + def __init__( + self, + *, + domain_name: Optional[str] = None, + node_upgrade_progress_list: Optional[List["NodeUpgradeProgressInfo"]] = None, + **kwargs + ): super(CurrentUpgradeDomainProgressInfo, self).__init__(**kwargs) self.domain_name = domain_name self.node_upgrade_progress_list = node_upgrade_progress_list -class DeactivationIntentDescription(Model): +class DeactivationIntentDescription(msrest.serialization.Model): """Describes the intent or reason for deactivating the node. - :param deactivation_intent: Describes the intent or reason for - deactivating the node. The possible values are following. Possible values - include: 'Pause', 'Restart', 'RemoveData' - :type deactivation_intent: str or - ~azure.servicefabric.models.DeactivationIntent + :param deactivation_intent: Describes the intent or reason for deactivating the node. The + possible values are following. Possible values include: "Pause", "Restart", "RemoveData". + :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent """ _attribute_map = { 'deactivation_intent': {'key': 'DeactivationIntent', 'type': 'str'}, } - def __init__(self, *, deactivation_intent=None, **kwargs) -> None: + def __init__( + self, + *, + deactivation_intent: Optional[Union[str, "DeactivationIntent"]] = None, + **kwargs + ): super(DeactivationIntentDescription, self).__init__(**kwargs) self.deactivation_intent = deactivation_intent -class ExecutionPolicy(Model): +class ExecutionPolicy(msrest.serialization.Model): """The execution policy of the service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DefaultExecutionPolicy, RunToCompletionExecutionPolicy + sub-classes are: DefaultExecutionPolicy, RunToCompletionExecutionPolicy. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str + :param type: Required. Enumerates the execution policy types for services.Constant filled by + server. Possible values include: "Default", "RunToCompletion". + :type type: str or ~azure.servicefabric.models.ExecutionPolicyType """ _validation = { @@ -7297,9 +8951,12 @@ class ExecutionPolicy(Model): 'type': {'Default': 'DefaultExecutionPolicy', 'RunToCompletion': 'RunToCompletionExecutionPolicy'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(ExecutionPolicy, self).__init__(**kwargs) - self.type = None + self.type = None # type: Optional[str] class DefaultExecutionPolicy(ExecutionPolicy): @@ -7307,8 +8964,9 @@ class DefaultExecutionPolicy(ExecutionPolicy): All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str + :param type: Required. Enumerates the execution policy types for services.Constant filled by + server. Possible values include: "Default", "RunToCompletion". + :type type: str or ~azure.servicefabric.models.ExecutionPolicyType """ _validation = { @@ -7319,76 +8977,87 @@ class DefaultExecutionPolicy(ExecutionPolicy): 'type': {'key': 'type', 'type': 'str'}, } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(DefaultExecutionPolicy, self).__init__(**kwargs) - self.type = 'Default' + self.type = 'Default' # type: str class DeletePropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that deletes a specified property if it - exists. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that deletes a specified property if it exists. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, } - def __init__(self, *, property_name: str, **kwargs) -> None: + def __init__( + self, + *, + property_name: str, + **kwargs + ): super(DeletePropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) - self.kind = 'Delete' + self.kind = 'Delete' # type: str class DeltaNodesCheckHealthEvaluation(HealthEvaluation): - """Represents health evaluation for delta nodes, containing health evaluations - for each unhealthy node that impacted current aggregated health state. - Can be returned during cluster upgrade when the aggregated health state of - the cluster is Warning or Error. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for delta nodes, containing health evaluations for each unhealthy node that impacted current aggregated health state. +Can be returned during cluster upgrade when the aggregated health state of the cluster is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param baseline_error_count: Number of nodes with aggregated heath state - Error in the health store at the beginning of the cluster upgrade. + :param baseline_error_count: Number of nodes with aggregated heath state Error in the health + store at the beginning of the cluster upgrade. :type baseline_error_count: long - :param baseline_total_count: Total number of nodes in the health store at - the beginning of the cluster upgrade. + :param baseline_total_count: Total number of nodes in the health store at the beginning of the + cluster upgrade. :type baseline_total_count: long - :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of - delta unhealthy nodes from the ClusterUpgradeHealthPolicy. + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of delta unhealthy nodes + from the ClusterUpgradeHealthPolicy. :type max_percent_delta_unhealthy_nodes: int :param total_count: Total number of nodes in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. - Includes all the unhealthy NodeHealthEvaluation that impacted the - aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. + Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -7396,9 +9065,9 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, 'max_percent_delta_unhealthy_nodes': {'key': 'MaxPercentDeltaUnhealthyNodes', 'type': 'int'}, @@ -7406,45 +9075,52 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, baseline_error_count: int=None, baseline_total_count: int=None, max_percent_delta_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + baseline_error_count: Optional[int] = None, + baseline_total_count: Optional[int] = None, + max_percent_delta_unhealthy_nodes: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(DeltaNodesCheckHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'DeltaNodesCheck' # type: str self.baseline_error_count = baseline_error_count self.baseline_total_count = baseline_total_count self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'DeltaNodesCheck' class DeployedApplicationHealth(EntityHealth): - """Information about the health of an application deployed on a Service Fabric - node. - - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + """Information about the health of an application deployed on a Service Fabric node. + + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: Name of the application deployed on the node whose health - information is described by this object. + :param name: Name of the application deployed on the node whose health information is described + by this object. :type name: str :param node_name: Name of the node where this application is deployed. :type node_name: str - :param deployed_service_package_health_states: Deployed service package - health states for the current deployed application as found in the health - store. + :param deployed_service_package_health_states: Deployed service package health states for the + current deployed application as found in the health store. :type deployed_service_package_health_states: list[~azure.servicefabric.models.DeployedServicePackageHealthState] """ @@ -7459,7 +9135,18 @@ class DeployedApplicationHealth(EntityHealth): 'deployed_service_package_health_states': {'key': 'DeployedServicePackageHealthStates', 'type': '[DeployedServicePackageHealthState]'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, node_name: str=None, deployed_service_package_health_states=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + name: Optional[str] = None, + node_name: Optional[str] = None, + deployed_service_package_health_states: Optional[List["DeployedServicePackageHealthState"]] = None, + **kwargs + ): super(DeployedApplicationHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.name = name self.node_name = node_name @@ -7467,34 +9154,36 @@ def __init__(self, *, aggregated_health_state=None, health_events=None, unhealth class DeployedApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a deployed application, containing - information about the data and the algorithm used by the health store to - evaluate health. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a deployed application, containing information about the data and the algorithm used by the health store to evaluate health. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Name of the node where the application is deployed to. :type node_name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the deployed application. - The types of the unhealthy evaluations can be - DeployedServicePackagesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the deployed application. + The types of the unhealthy evaluations can be DeployedServicePackagesHealthEvaluation or + EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -7502,20 +9191,29 @@ class DeployedApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, node_name: str=None, application_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + node_name: Optional[str] = None, + application_name: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(DeployedApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'DeployedApplication' # type: str self.node_name = node_name self.application_name = application_name self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'DeployedApplication' class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): @@ -7523,25 +9221,44 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -7559,17 +9276,16 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'node_name': {'required': True}, @@ -7584,11 +9300,11 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, @@ -7602,8 +9318,28 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_instance_id: int, + node_name: str, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(DeployedApplicationHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'DeployedApplicationHealthReportExpired' # type: str self.application_instance_id = application_instance_id self.node_name = node_name self.source_id = source_id @@ -7614,24 +9350,18 @@ def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, a self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'DeployedApplicationHealthReportExpired' class DeployedApplicationHealthState(EntityHealthState): - """Represents the health state of a deployed application, which contains the - entity identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param node_name: Name of the node on which the service package is - deployed. + """Represents the health state of a deployed application, which contains the entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is deployed. :type node_name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str """ @@ -7641,27 +9371,31 @@ class DeployedApplicationHealthState(EntityHealthState): 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, node_name: str=None, application_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + node_name: Optional[str] = None, + application_name: Optional[str] = None, + **kwargs + ): super(DeployedApplicationHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.node_name = node_name self.application_name = application_name class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a deployed application, which contains - the node where the application is deployed, the aggregated health state and - any deployed service packages that respect the chunk query description - filters. - - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + """Represents the health state chunk of a deployed application, which contains the node where the application is deployed, the aggregated health state and any deployed service packages that respect the chunk query description filters. + + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of node where the application is deployed. :type node_name: str - :param deployed_service_package_health_state_chunks: The list of deployed - service package health state chunks belonging to the deployed application - that respect the filters in the cluster health chunk query description. + :param deployed_service_package_health_state_chunks: The list of deployed service package + health state chunks belonging to the deployed application that respect the filters in the + cluster health chunk query description. :type deployed_service_package_health_state_chunks: ~azure.servicefabric.models.DeployedServicePackageHealthStateChunkList """ @@ -7672,93 +9406,87 @@ class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_service_package_health_state_chunks': {'key': 'DeployedServicePackageHealthStateChunks', 'type': 'DeployedServicePackageHealthStateChunkList'}, } - def __init__(self, *, health_state=None, node_name: str=None, deployed_service_package_health_state_chunks=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + node_name: Optional[str] = None, + deployed_service_package_health_state_chunks: Optional["DeployedServicePackageHealthStateChunkList"] = None, + **kwargs + ): super(DeployedApplicationHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.node_name = node_name self.deployed_service_package_health_state_chunks = deployed_service_package_health_state_chunks -class DeployedApplicationHealthStateChunkList(Model): - """The list of deployed application health state chunks that respect the input - filters in the chunk query. Returned by get cluster health state chunks - query. +class DeployedApplicationHealthStateChunkList(msrest.serialization.Model): + """The list of deployed application health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param items: The list of deployed application health state chunks that - respect the input filters in the chunk query. - :type items: - list[~azure.servicefabric.models.DeployedApplicationHealthStateChunk] + :param items: The list of deployed application health state chunks that respect the input + filters in the chunk query. + :type items: list[~azure.servicefabric.models.DeployedApplicationHealthStateChunk] """ _attribute_map = { 'items': {'key': 'Items', 'type': '[DeployedApplicationHealthStateChunk]'}, } - def __init__(self, *, items=None, **kwargs) -> None: + def __init__( + self, + *, + items: Optional[List["DeployedApplicationHealthStateChunk"]] = None, + **kwargs + ): super(DeployedApplicationHealthStateChunkList, self).__init__(**kwargs) self.items = items -class DeployedApplicationHealthStateFilter(Model): - """Defines matching criteria to determine whether a deployed application - should be included as a child of an application in the cluster health - chunk. - The deployed applications are only returned if the parent application - matches a filter specified in the cluster health chunk query description. - One filter can match zero, one or multiple deployed applications, depending - on its properties. - - :param node_name_filter: The name of the node where the application is - deployed in order to match the filter. - If specified, the filter is applied only to the application deployed on - the specified node. - If the application is not deployed on the node with the specified name, no - deployed application is returned in the cluster health chunk based on this - filter. - Otherwise, the deployed application is included in the cluster health - chunk if it respects the other filter properties. - If not specified, all deployed applications that match the parent filters - (if any) are taken into consideration and matched against the other filter - members, like health state filter. +class DeployedApplicationHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a deployed application should be included as a child of an application in the cluster health chunk. +The deployed applications are only returned if the parent application matches a filter specified in the cluster health chunk query description. +One filter can match zero, one or multiple deployed applications, depending on its properties. + + :param node_name_filter: The name of the node where the application is deployed in order to + match the filter. + If specified, the filter is applied only to the application deployed on the specified node. + If the application is not deployed on the node with the specified name, no deployed + application is returned in the cluster health chunk based on this filter. + Otherwise, the deployed application is included in the cluster health chunk if it respects the + other filter properties. + If not specified, all deployed applications that match the parent filters (if any) are taken + into consideration and matched against the other filter members, like health state filter. :type node_name_filter: str - :param health_state_filter: The filter for the health state of the - deployed applications. It allows selecting deployed applications if they - match the desired health states. - The possible values are integer value of one of the following health - states. Only deployed applications that match the filter are returned. All - deployed applications are used to evaluate the cluster aggregated health - state. - If not specified, default value is None, unless the node name is - specified. If the filter has default value and node name is specified, the - matching deployed application is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches deployed applications - with HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the deployed applications. It + allows selecting deployed applications if they match the desired health states. + The possible values are integer value of one of the following health states. Only deployed + applications that match the filter are returned. All deployed applications are used to evaluate + the cluster aggregated health state. + If not specified, default value is None, unless the node name is specified. If the filter has + default value and node name is specified, the matching deployed application is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed applications with HealthState + value of OK (2) and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int - :param deployed_service_package_filters: Defines a list of filters that - specify which deployed service packages to be included in the returned - cluster health chunk as children of the parent deployed application. The - deployed service packages are returned only if the parent deployed + :param deployed_service_package_filters: Defines a list of filters that specify which deployed + service packages to be included in the returned cluster health chunk as children of the parent + deployed application. The deployed service packages are returned only if the parent deployed application matches a filter. - If the list is empty, no deployed service packages are returned. All the - deployed service packages are used to evaluate the parent deployed - application aggregated health state, regardless of the input filters. - The deployed application filter may specify multiple deployed service - package filters. - For example, it can specify a filter to return all deployed service - packages with health state Error and another filter to always include a - deployed service package on a node. + If the list is empty, no deployed service packages are returned. All the deployed service + packages are used to evaluate the parent deployed application aggregated health state, + regardless of the input filters. + The deployed application filter may specify multiple deployed service package filters. + For example, it can specify a filter to return all deployed service packages with health state + Error and another filter to always include a deployed service package on a node. :type deployed_service_package_filters: list[~azure.servicefabric.models.DeployedServicePackageHealthStateFilter] """ @@ -7769,47 +9497,49 @@ class DeployedApplicationHealthStateFilter(Model): 'deployed_service_package_filters': {'key': 'DeployedServicePackageFilters', 'type': '[DeployedServicePackageHealthStateFilter]'}, } - def __init__(self, *, node_name_filter: str=None, health_state_filter: int=0, deployed_service_package_filters=None, **kwargs) -> None: + def __init__( + self, + *, + node_name_filter: Optional[str] = None, + health_state_filter: Optional[int] = 0, + deployed_service_package_filters: Optional[List["DeployedServicePackageHealthStateFilter"]] = None, + **kwargs + ): super(DeployedApplicationHealthStateFilter, self).__init__(**kwargs) self.node_name_filter = node_name_filter self.health_state_filter = health_state_filter self.deployed_service_package_filters = deployed_service_package_filters -class DeployedApplicationInfo(Model): +class DeployedApplicationInfo(msrest.serialization.Model): """Information about application deployed on the node. - :param id: The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to - identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param id: The identity of the application. This is an encoded representation of the + application name. This is used in the REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI - scheme. + :param name: The name of the application, including the 'fabric:' URI scheme. :type name: str - :param type_name: The application type name as defined in the application - manifest. + :param type_name: The application type name as defined in the application manifest. :type type_name: str - :param status: The status of the application deployed on the node. - Following are the possible values. Possible values include: 'Invalid', - 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' + :param status: The status of the application deployed on the node. Following are the possible + values. Possible values include: "Invalid", "Downloading", "Activating", "Active", "Upgrading", + "Deactivating". :type status: str or ~azure.servicefabric.models.DeployedApplicationStatus - :param work_directory: The work directory of the application on the node. - The work directory can be used to store application data. + :param work_directory: The work directory of the application on the node. The work directory + can be used to store application data. :type work_directory: str - :param log_directory: The log directory of the application on the node. - The log directory can be used to store application logs. + :param log_directory: The log directory of the application on the node. The log directory can + be used to store application logs. :type log_directory: str - :param temp_directory: The temp directory of the application on the node. - The code packages belonging to the application are forked with this - directory set as their temporary directory. + :param temp_directory: The temp directory of the application on the node. The code packages + belonging to the application are forked with this directory set as their temporary directory. :type temp_directory: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState """ @@ -7824,7 +9554,19 @@ class DeployedApplicationInfo(Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__(self, *, id: str=None, name: str=None, type_name: str=None, status=None, work_directory: str=None, log_directory: str=None, temp_directory: str=None, health_state=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + type_name: Optional[str] = None, + status: Optional[Union[str, "DeployedApplicationStatus"]] = None, + work_directory: Optional[str] = None, + log_directory: Optional[str] = None, + temp_directory: Optional[str] = None, + health_state: Optional[Union[str, "HealthState"]] = None, + **kwargs + ): super(DeployedApplicationInfo, self).__init__(**kwargs) self.id = id self.name = name @@ -7841,25 +9583,44 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -7877,17 +9638,16 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'node_name': {'required': True}, @@ -7902,11 +9662,11 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, @@ -7920,8 +9680,28 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + application_instance_id: int, + node_name: str, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(DeployedApplicationNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'DeployedApplicationNewHealthReport' # type: str self.application_instance_id = application_instance_id self.node_name = node_name self.source_id = source_id @@ -7932,41 +9712,41 @@ def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, a self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'DeployedApplicationNewHealthReport' class DeployedApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for deployed applications, containing health - evaluations for each unhealthy deployed application that impacted current - aggregated health state. - Can be returned when evaluating application health and the aggregated - health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for deployed applications, containing health evaluations for each unhealthy deployed application that impacted current aggregated health state. +Can be returned when evaluating application health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_deployed_applications: Maximum allowed - percentage of unhealthy deployed applications from the - ApplicationHealthPolicy. + :param max_percent_unhealthy_deployed_applications: Maximum allowed percentage of unhealthy + deployed applications from the ApplicationHealthPolicy. :type max_percent_unhealthy_deployed_applications: int - :param total_count: Total number of deployed applications of the - application in the health store. + :param total_count: Total number of deployed applications of the application in the health + store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - DeployedApplicationHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy DeployedApplicationHealthEvaluation that impacted the + aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -7974,64 +9754,66 @@ class DeployedApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_deployed_applications': {'key': 'MaxPercentUnhealthyDeployedApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_deployed_applications: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + max_percent_unhealthy_deployed_applications: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(DeployedApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'DeployedApplications' # type: str self.max_percent_unhealthy_deployed_applications = max_percent_unhealthy_deployed_applications self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'DeployedApplications' -class DeployedCodePackageInfo(Model): +class DeployedCodePackageInfo(msrest.serialization.Model): """Information about code package deployed on a Service Fabric node. :param name: The name of the code package. :type name: str - :param version: The version of the code package specified in service - manifest. + :param version: The version of the code package specified in service manifest. :type version: str - :param service_manifest_name: The name of service manifest that specified - this code package. + :param service_manifest_name: The name of service manifest that specified this code package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_type: Specifies the type of host for main entry point of a - code package as specified in service manifest. Possible values include: - 'Invalid', 'ExeHost', 'ContainerHost' + :param host_type: Specifies the type of host for main entry point of a code package as + specified in service manifest. Possible values include: "Invalid", "ExeHost", "ContainerHost". :type host_type: str or ~azure.servicefabric.models.HostType - :param host_isolation_mode: Specifies the isolation mode of main entry - point of a code package when it's host type is ContainerHost. This is - specified as part of container host policies in application manifest while - importing service manifest. Possible values include: 'None', 'Process', - 'HyperV' - :type host_isolation_mode: str or - ~azure.servicefabric.models.HostIsolationMode - :param status: Specifies the status of a deployed application or service - package on a Service Fabric node. Possible values include: 'Invalid', - 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating', - 'RanToCompletion', 'Failed' + :param host_isolation_mode: Specifies the isolation mode of main entry point of a code package + when it's host type is ContainerHost. This is specified as part of container host policies in + application manifest while importing service manifest. Possible values include: "None", + "Process", "HyperV". + :type host_isolation_mode: str or ~azure.servicefabric.models.HostIsolationMode + :param status: Specifies the status of a deployed application or service package on a Service + Fabric node. Possible values include: "Invalid", "Downloading", "Activating", "Active", + "Upgrading", "Deactivating", "RanToCompletion", "Failed". :type status: str or ~azure.servicefabric.models.DeploymentStatus - :param run_frequency_interval: The interval at which code package is run. - This is used for periodic code package. + :param run_frequency_interval: The interval at which code package is run. This is used for + periodic code package. :type run_frequency_interval: str - :param setup_entry_point: Information about setup or main entry point of a - code package deployed on a Service Fabric node. + :param setup_entry_point: Information about setup or main entry point of a code package + deployed on a Service Fabric node. :type setup_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint - :param main_entry_point: Information about setup or main entry point of a - code package deployed on a Service Fabric node. + :param main_entry_point: Information about setup or main entry point of a code package deployed + on a Service Fabric node. :type main_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint """ @@ -8048,7 +9830,21 @@ class DeployedCodePackageInfo(Model): 'main_entry_point': {'key': 'MainEntryPoint', 'type': 'CodePackageEntryPoint'}, } - def __init__(self, *, name: str=None, version: str=None, service_manifest_name: str=None, service_package_activation_id: str=None, host_type=None, host_isolation_mode=None, status=None, run_frequency_interval: str=None, setup_entry_point=None, main_entry_point=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + version: Optional[str] = None, + service_manifest_name: Optional[str] = None, + service_package_activation_id: Optional[str] = None, + host_type: Optional[Union[str, "HostType"]] = None, + host_isolation_mode: Optional[Union[str, "HostIsolationMode"]] = None, + status: Optional[Union[str, "DeploymentStatus"]] = None, + run_frequency_interval: Optional[str] = None, + setup_entry_point: Optional["CodePackageEntryPoint"] = None, + main_entry_point: Optional["CodePackageEntryPoint"] = None, + **kwargs + ): super(DeployedCodePackageInfo, self).__init__(**kwargs) self.name = name self.version = version @@ -8063,28 +9859,24 @@ def __init__(self, *, name: str=None, version: str=None, service_manifest_name: class DeployedServicePackageHealth(EntityHealth): - """Information about the health of a service package for a specific - application deployed on a Service Fabric node. - - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + """Information about the health of a service package for a specific application deployed on a Service Fabric node. + + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str :param service_manifest_name: Name of the service manifest. :type service_manifest_name: str @@ -8102,7 +9894,18 @@ class DeployedServicePackageHealth(EntityHealth): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, application_name: str=None, service_manifest_name: str=None, node_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + application_name: Optional[str] = None, + service_manifest_name: Optional[str] = None, + node_name: Optional[str] = None, + **kwargs + ): super(DeployedServicePackageHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.application_name = application_name self.service_manifest_name = service_manifest_name @@ -8110,36 +9913,36 @@ def __init__(self, *, aggregated_health_state=None, health_events=None, unhealth class DeployedServicePackageHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a deployed service package, containing - information about the data and the algorithm used by health store to - evaluate health. The evaluation is returned only when the aggregated health - state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a deployed service package, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str :param service_manifest_name: The name of the service manifest. :type service_manifest_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state. The type of the unhealthy evaluations - can be EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state. The type of the unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -8147,22 +9950,32 @@ class DeployedServicePackageHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, node_name: str=None, application_name: str=None, service_manifest_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + node_name: Optional[str] = None, + application_name: Optional[str] = None, + service_manifest_name: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(DeployedServicePackageHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'DeployedServicePackage' # type: str self.node_name = node_name self.application_name = application_name self.service_manifest_name = service_manifest_name self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'DeployedServicePackage' class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): @@ -8170,33 +9983,50 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param service_manifest: Required. Service manifest name. :type service_manifest: str - :param service_package_instance_id: Required. Id of Service package - instance. + :param service_package_instance_id: Required. Id of Service package instance. :type service_package_instance_id: long - :param service_package_activation_id: Required. Id of Service package - activation. + :param service_package_activation_id: Required. Id of Service package activation. :type service_package_activation_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -8212,17 +10042,16 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'service_manifest': {'required': True}, 'service_package_instance_id': {'required': True}, @@ -8239,11 +10068,11 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_manifest': {'key': 'ServiceManifest', 'type': 'str'}, 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, @@ -8259,8 +10088,30 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_manifest: str, service_package_instance_id: int, service_package_activation_id: str, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + service_manifest: str, + service_package_instance_id: int, + service_package_activation_id: str, + node_name: str, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(DeployedServicePackageHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'DeployedServicePackageHealthReportExpired' # type: str self.service_manifest = service_manifest self.service_package_instance_id = service_package_instance_id self.service_package_activation_id = service_package_activation_id @@ -8273,33 +10124,25 @@ def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, s self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'DeployedServicePackageHealthReportExpired' class DeployedServicePackageHealthState(EntityHealthState): - """Represents the health state of a deployed service package, containing the - entity identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param node_name: Name of the node on which the service package is - deployed. + """Represents the health state of a deployed service package, containing the entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is deployed. :type node_name: str - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_manifest_name: Name of the manifest describing the service - package. + :param service_manifest_name: Name of the manifest describing the service package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -8312,7 +10155,16 @@ class DeployedServicePackageHealthState(EntityHealthState): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, node_name: str=None, application_name: str=None, service_manifest_name: str=None, service_package_activation_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + node_name: Optional[str] = None, + application_name: Optional[str] = None, + service_manifest_name: Optional[str] = None, + service_package_activation_id: Optional[str] = None, + **kwargs + ): super(DeployedServicePackageHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.node_name = node_name self.application_name = application_name @@ -8321,21 +10173,18 @@ def __init__(self, *, aggregated_health_state=None, node_name: str=None, applica class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a deployed service package, which - contains the service manifest name and the service package aggregated - health state. + """Represents the health state chunk of a deployed service package, which contains the service manifest name and the service package aggregated health state. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param service_manifest_name: The name of the service manifest. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -8346,88 +10195,85 @@ class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, *, health_state=None, service_manifest_name: str=None, service_package_activation_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + service_manifest_name: Optional[str] = None, + service_package_activation_id: Optional[str] = None, + **kwargs + ): super(DeployedServicePackageHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.service_manifest_name = service_manifest_name self.service_package_activation_id = service_package_activation_id -class DeployedServicePackageHealthStateChunkList(Model): - """The list of deployed service package health state chunks that respect the - input filters in the chunk query. Returned by get cluster health state - chunks query. +class DeployedServicePackageHealthStateChunkList(msrest.serialization.Model): + """The list of deployed service package health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param items: The list of deployed service package health state chunks - that respect the input filters in the chunk query. - :type items: - list[~azure.servicefabric.models.DeployedServicePackageHealthStateChunk] + :param items: The list of deployed service package health state chunks that respect the input + filters in the chunk query. + :type items: list[~azure.servicefabric.models.DeployedServicePackageHealthStateChunk] """ _attribute_map = { 'items': {'key': 'Items', 'type': '[DeployedServicePackageHealthStateChunk]'}, } - def __init__(self, *, items=None, **kwargs) -> None: + def __init__( + self, + *, + items: Optional[List["DeployedServicePackageHealthStateChunk"]] = None, + **kwargs + ): super(DeployedServicePackageHealthStateChunkList, self).__init__(**kwargs) self.items = items -class DeployedServicePackageHealthStateFilter(Model): - """Defines matching criteria to determine whether a deployed service package - should be included as a child of a deployed application in the cluster - health chunk. - The deployed service packages are only returned if the parent entities - match a filter specified in the cluster health chunk query description. The - parent deployed application and its parent application must be included in - the cluster health chunk. - One filter can match zero, one or multiple deployed service packages, - depending on its properties. - - :param service_manifest_name_filter: The name of the service manifest - which identifies the deployed service packages that matches the filter. - If specified, the filter is applied only to the specified deployed service - packages, if any. - If no deployed service packages with specified manifest name exist, - nothing is returned in the cluster health chunk based on this filter. - If any deployed service package exists, they are included in the cluster - health chunk if it respects the other filter properties. - If not specified, all deployed service packages that match the parent - filters (if any) are taken into consideration and matched against the - other filter members, like health state filter. +class DeployedServicePackageHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a deployed service package should be included as a child of a deployed application in the cluster health chunk. +The deployed service packages are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent deployed application and its parent application must be included in the cluster health chunk. +One filter can match zero, one or multiple deployed service packages, depending on its properties. + + :param service_manifest_name_filter: The name of the service manifest which identifies the + deployed service packages that matches the filter. + If specified, the filter is applied only to the specified deployed service packages, if any. + If no deployed service packages with specified manifest name exist, nothing is returned in the + cluster health chunk based on this filter. + If any deployed service package exists, they are included in the cluster health chunk if it + respects the other filter properties. + If not specified, all deployed service packages that match the parent filters (if any) are + taken into consideration and matched against the other filter members, like health state + filter. :type service_manifest_name_filter: str - :param service_package_activation_id_filter: The activation ID of a - deployed service package that matches the filter. - If not specified, the filter applies to all deployed service packages that - match the other parameters. - If specified, the filter matches only the deployed service package with - the specified activation ID. + :param service_package_activation_id_filter: The activation ID of a deployed service package + that matches the filter. + If not specified, the filter applies to all deployed service packages that match the other + parameters. + If specified, the filter matches only the deployed service package with the specified + activation ID. :type service_package_activation_id_filter: str - :param health_state_filter: The filter for the health state of the - deployed service packages. It allows selecting deployed service packages - if they match the desired health states. - The possible values are integer value of one of the following health - states. Only deployed service packages that match the filter are returned. - All deployed service packages are used to evaluate the parent deployed - application aggregated health state. - If not specified, default value is None, unless the deployed service - package ID is specified. If the filter has default value and deployed - service package ID is specified, the matching deployed service package is - returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches deployed service - packages with HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the deployed service packages. + It allows selecting deployed service packages if they match the desired health states. + The possible values are integer value of one of the following health states. Only deployed + service packages that match the filter are returned. All deployed service packages are used to + evaluate the parent deployed application aggregated health state. + If not specified, default value is None, unless the deployed service package ID is specified. + If the filter has default value and deployed service package ID is specified, the matching + deployed service package is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed service packages with HealthState + value of OK (2) and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int """ @@ -8437,32 +10283,35 @@ class DeployedServicePackageHealthStateFilter(Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__(self, *, service_manifest_name_filter: str=None, service_package_activation_id_filter: str=None, health_state_filter: int=0, **kwargs) -> None: + def __init__( + self, + *, + service_manifest_name_filter: Optional[str] = None, + service_package_activation_id_filter: Optional[str] = None, + health_state_filter: Optional[int] = 0, + **kwargs + ): super(DeployedServicePackageHealthStateFilter, self).__init__(**kwargs) self.service_manifest_name_filter = service_manifest_name_filter self.service_package_activation_id_filter = service_package_activation_id_filter self.health_state_filter = health_state_filter -class DeployedServicePackageInfo(Model): +class DeployedServicePackageInfo(msrest.serialization.Model): """Information about service package deployed on a Service Fabric node. - :param name: The name of the service package as specified in the service - manifest. + :param name: The name of the service package as specified in the service manifest. :type name: str - :param version: The version of the service package specified in service - manifest. + :param version: The version of the service package specified in service manifest. :type version: str - :param status: Specifies the status of a deployed application or service - package on a Service Fabric node. Possible values include: 'Invalid', - 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating', - 'RanToCompletion', 'Failed' + :param status: Specifies the status of a deployed application or service package on a Service + Fabric node. Possible values include: "Invalid", "Downloading", "Activating", "Active", + "Upgrading", "Deactivating", "RanToCompletion", "Failed". :type status: str or ~azure.servicefabric.models.DeploymentStatus - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -8474,7 +10323,15 @@ class DeployedServicePackageInfo(Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, *, name: str=None, version: str=None, status=None, service_package_activation_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + version: Optional[str] = None, + status: Optional[Union[str, "DeploymentStatus"]] = None, + service_package_activation_id: Optional[str] = None, + **kwargs + ): super(DeployedServicePackageInfo, self).__init__(**kwargs) self.name = name self.version = version @@ -8487,33 +10344,50 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_id: Required. The identity of the application. This is - an encoded representation of the application name. This is used in the - REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the application name is "fabric:/myapp/app1", - the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" - in previous versions. + :param application_id: Required. The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to identify the + application resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the application name is "fabric:/myapp/app1", + the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. :type application_id: str :param service_manifest_name: Required. Service manifest name. :type service_manifest_name: str - :param service_package_instance_id: Required. Id of Service package - instance. + :param service_package_instance_id: Required. Id of Service package instance. :type service_package_instance_id: long - :param service_package_activation_id: Required. Id of Service package - activation. + :param service_package_activation_id: Required. Id of Service package activation. :type service_package_activation_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -8529,17 +10403,16 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'application_id': {'required': True}, 'service_manifest_name': {'required': True}, 'service_package_instance_id': {'required': True}, @@ -8556,11 +10429,11 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, @@ -8576,8 +10449,30 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_manifest_name: str, service_package_instance_id: int, service_package_activation_id: str, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + application_id: str, + service_manifest_name: str, + service_package_instance_id: int, + service_package_activation_id: str, + node_name: str, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(DeployedServicePackageNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.kind = 'DeployedServicePackageNewHealthReport' # type: str self.service_manifest_name = service_manifest_name self.service_package_instance_id = service_package_instance_id self.service_package_activation_id = service_package_activation_id @@ -8590,38 +10485,37 @@ def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, s self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'DeployedServicePackageNewHealthReport' class DeployedServicePackagesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for deployed service packages, containing - health evaluations for each unhealthy deployed service package that - impacted current aggregated health state. Can be returned when evaluating - deployed application health and the aggregated health state is either Error - or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for deployed service packages, containing health evaluations for each unhealthy deployed service package that impacted current aggregated health state. Can be returned when evaluating deployed application health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param total_count: Total number of deployed service packages of the - deployed application in the health store. + :param total_count: Total number of deployed service packages of the deployed application in + the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - DeployedServicePackageHealthEvaluation that impacted the aggregated - health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy DeployedServicePackageHealthEvaluation that impacted the + aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -8629,52 +10523,56 @@ class DeployedServicePackagesHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(DeployedServicePackagesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'DeployedServicePackages' # type: str self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'DeployedServicePackages' -class DeployedServiceReplicaDetailInfo(Model): +class DeployedServiceReplicaDetailInfo(msrest.serialization.Model): """Information about a Service Fabric service replica deployed on a node. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeployedStatefulServiceReplicaDetailInfo, - DeployedStatelessServiceInstanceDetailInfo + sub-classes are: DeployedStatefulServiceReplicaDetailInfo, DeployedStatelessServiceInstanceDetailInfo. All required parameters must be populated in order to send to Azure. - :param service_name: Full hierarchical name of the service in URI format - starting with `fabric:`. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: Full hierarchical name of the service in URI format starting with + ``fabric:``. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle - operation on a stateful service replica or stateless service instance. - Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', - 'Abort' - :type current_service_operation: str or - ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the - current service operation in UTC format. - :type current_service_operation_start_time_utc: datetime + :param current_service_operation: Specifies the current active life-cycle operation on a + stateful service replica or stateless service instance. Possible values include: "Unknown", + "None", "Open", "ChangeRole", "Close", "Abort". + :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the current service + operation in UTC format. + :type current_service_operation_start_time_utc: ~datetime.datetime :param reported_load: List of load reported by replica. - :type reported_load: - list[~azure.servicefabric.models.LoadMetricReportInfo] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] """ _validation = { @@ -8682,75 +10580,77 @@ class DeployedServiceReplicaDetailInfo(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaDetailInfo', 'Stateless': 'DeployedStatelessServiceInstanceDetailInfo'} } - def __init__(self, *, service_name: str=None, partition_id: str=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + partition_id: Optional[str] = None, + current_service_operation: Optional[Union[str, "ServiceOperationName"]] = None, + current_service_operation_start_time_utc: Optional[datetime.datetime] = None, + reported_load: Optional[List["LoadMetricReportInfo"]] = None, + **kwargs + ): super(DeployedServiceReplicaDetailInfo, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.service_name = service_name self.partition_id = partition_id self.current_service_operation = current_service_operation self.current_service_operation_start_time_utc = current_service_operation_start_time_utc self.reported_load = reported_load - self.service_kind = None -class DeployedServiceReplicaInfo(Model): +class DeployedServiceReplicaInfo(msrest.serialization.Model): """Information about a Service Fabric service replica deployed on a node. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeployedStatefulServiceReplicaInfo, - DeployedStatelessServiceInstanceInfo + sub-classes are: DeployedStatefulServiceReplicaInfo, DeployedStatelessServiceInstanceInfo. All required parameters must be populated in order to send to Azure. - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this - replica. + :param code_package_name: The name of the code package that hosts this replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or - ChangeRole. + :param address: The last address returned by the replica in Open or ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the - replica. This will be zero if the replica is down. In hyper-v containers - this host process ID will be from different kernel. + :param host_process_id: Host process ID of the process that is hosting the replica. This will + be zero if the replica is down. In hyper-v containers this host process ID will be from + different kernel. :type host_process_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -8758,6 +10658,7 @@ class DeployedServiceReplicaInfo(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -8767,15 +10668,28 @@ class DeployedServiceReplicaInfo(Model): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaInfo', 'Stateless': 'DeployedStatelessServiceInstanceInfo'} } - def __init__(self, *, service_name: str=None, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, partition_id: str=None, replica_status=None, address: str=None, service_package_activation_id: str=None, host_process_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + service_type_name: Optional[str] = None, + service_manifest_name: Optional[str] = None, + code_package_name: Optional[str] = None, + partition_id: Optional[str] = None, + replica_status: Optional[Union[str, "ReplicaStatus"]] = None, + address: Optional[str] = None, + service_package_activation_id: Optional[str] = None, + host_process_id: Optional[str] = None, + **kwargs + ): super(DeployedServiceReplicaInfo, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.service_name = service_name self.service_type_name = service_type_name self.service_manifest_name = service_manifest_name @@ -8785,31 +10699,25 @@ def __init__(self, *, service_name: str=None, service_type_name: str=None, servi self.address = address self.service_package_activation_id = service_package_activation_id self.host_process_id = host_process_id - self.service_kind = None -class DeployedServiceTypeInfo(Model): - """Information about service type deployed on a node, information such as the - status of the service type registration on a node. +class DeployedServiceTypeInfo(msrest.serialization.Model): + """Information about service type deployed on a node, information such as the status of the service type registration on a node. - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that registered the - service type. + :param code_package_name: The name of the code package that registered the service type. :type code_package_name: str - :param status: The status of the service type registration on the node. - Possible values include: 'Invalid', 'Disabled', 'Enabled', 'Registered' - :type status: str or - ~azure.servicefabric.models.ServiceTypeRegistrationStatus - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param status: The status of the service type registration on the node. Possible values + include: "Invalid", "Disabled", "Enabled", "Registered". + :type status: str or ~azure.servicefabric.models.ServiceTypeRegistrationStatus + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -8822,7 +10730,16 @@ class DeployedServiceTypeInfo(Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, *, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, status=None, service_package_activation_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + service_type_name: Optional[str] = None, + service_manifest_name: Optional[str] = None, + code_package_name: Optional[str] = None, + status: Optional[Union[str, "ServiceTypeRegistrationStatus"]] = None, + service_package_activation_id: Optional[str] = None, + **kwargs + ): super(DeployedServiceTypeInfo, self).__init__(**kwargs) self.service_type_name = service_type_name self.service_manifest_name = service_manifest_name @@ -8832,71 +10749,54 @@ def __init__(self, *, service_type_name: str=None, service_manifest_name: str=No class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo): - """Information about a stateful replica running in a code package. Note - DeployedServiceReplicaQueryResult will contain duplicate data like - ServiceKind, ServiceName, PartitionId and replicaId. + """Information about a stateful replica running in a code package. Note DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and replicaId. All required parameters must be populated in order to send to Azure. - :param service_name: Full hierarchical name of the service in URI format - starting with `fabric:`. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: Full hierarchical name of the service in URI format starting with + ``fabric:``. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle - operation on a stateful service replica or stateless service instance. - Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', - 'Abort' - :type current_service_operation: str or - ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the - current service operation in UTC format. - :type current_service_operation_start_time_utc: datetime + :param current_service_operation: Specifies the current active life-cycle operation on a + stateful service replica or stateless service instance. Possible values include: "Unknown", + "None", "Open", "ChangeRole", "Close", "Abort". + :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the current service + operation in UTC format. + :type current_service_operation_start_time_utc: ~datetime.datetime :param reported_load: List of load reported by replica. - :type reported_load: - list[~azure.servicefabric.models.LoadMetricReportInfo] - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str - :param current_replicator_operation: Specifies the operation currently - being executed by the Replicator. Possible values include: 'Invalid', - 'None', 'Open', 'ChangeRole', 'UpdateEpoch', 'Close', 'Abort', - 'OnDataLoss', 'WaitForCatchup', 'Build' - :type current_replicator_operation: str or - ~azure.servicefabric.models.ReplicatorOperationName - :param read_status: Specifies the access status of the partition. Possible - values include: 'Invalid', 'Granted', 'ReconfigurationPending', - 'NotPrimary', 'NoWriteQuorum' - :type read_status: str or - ~azure.servicefabric.models.PartitionAccessStatus - :param write_status: Specifies the access status of the partition. - Possible values include: 'Invalid', 'Granted', 'ReconfigurationPending', - 'NotPrimary', 'NoWriteQuorum' - :type write_status: str or - ~azure.servicefabric.models.PartitionAccessStatus - :param replicator_status: Represents a base class for primary or secondary - replicator status. - Contains information about the service fabric replicator like the - replication/copy queue utilization, last acknowledgement received - timestamp, etc. + :param current_replicator_operation: Specifies the operation currently being executed by the + Replicator. Possible values include: "Invalid", "None", "Open", "ChangeRole", "UpdateEpoch", + "Close", "Abort", "OnDataLoss", "WaitForCatchup", "Build". + :type current_replicator_operation: str or ~azure.servicefabric.models.ReplicatorOperationName + :param read_status: Specifies the access status of the partition. Possible values include: + "Invalid", "Granted", "ReconfigurationPending", "NotPrimary", "NoWriteQuorum". + :type read_status: str or ~azure.servicefabric.models.PartitionAccessStatus + :param write_status: Specifies the access status of the partition. Possible values include: + "Invalid", "Granted", "ReconfigurationPending", "NotPrimary", "NoWriteQuorum". + :type write_status: str or ~azure.servicefabric.models.PartitionAccessStatus + :param replicator_status: Represents a base class for primary or secondary replicator status. + Contains information about the service fabric replicator like the replication/copy queue + utilization, last acknowledgement received timestamp, etc. :type replicator_status: ~azure.servicefabric.models.ReplicatorStatus - :param replica_status: Key value store related information for the - replica. - :type replica_status: - ~azure.servicefabric.models.KeyValueStoreReplicaStatus - :param deployed_service_replica_query_result: Information about a stateful - service replica deployed on a node. + :param replica_status: Key value store related information for the replica. + :type replica_status: ~azure.servicefabric.models.KeyValueStoreReplicaStatus + :param deployed_service_replica_query_result: Information about a stateful service replica + deployed on a node. :type deployed_service_replica_query_result: ~azure.servicefabric.models.DeployedStatefulServiceReplicaInfo """ @@ -8906,12 +10806,12 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, 'current_replicator_operation': {'key': 'CurrentReplicatorOperation', 'type': 'str'}, 'read_status': {'key': 'ReadStatus', 'type': 'str'}, @@ -8921,8 +10821,25 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatefulServiceReplicaInfo'}, } - def __init__(self, *, service_name: str=None, partition_id: str=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, replica_id: str=None, current_replicator_operation=None, read_status=None, write_status=None, replicator_status=None, replica_status=None, deployed_service_replica_query_result=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + partition_id: Optional[str] = None, + current_service_operation: Optional[Union[str, "ServiceOperationName"]] = None, + current_service_operation_start_time_utc: Optional[datetime.datetime] = None, + reported_load: Optional[List["LoadMetricReportInfo"]] = None, + replica_id: Optional[str] = None, + current_replicator_operation: Optional[Union[str, "ReplicatorOperationName"]] = None, + read_status: Optional[Union[str, "PartitionAccessStatus"]] = None, + write_status: Optional[Union[str, "PartitionAccessStatus"]] = None, + replicator_status: Optional["ReplicatorStatus"] = None, + replica_status: Optional["KeyValueStoreReplicaStatus"] = None, + deployed_service_replica_query_result: Optional["DeployedStatefulServiceReplicaInfo"] = None, + **kwargs + ): super(DeployedStatefulServiceReplicaDetailInfo, self).__init__(service_name=service_name, partition_id=partition_id, current_service_operation=current_service_operation, current_service_operation_start_time_utc=current_service_operation_start_time_utc, reported_load=reported_load, **kwargs) + self.service_kind = 'Stateful' # type: str self.replica_id = replica_id self.current_replicator_operation = current_replicator_operation self.read_status = read_status @@ -8930,7 +10847,6 @@ def __init__(self, *, service_name: str=None, partition_id: str=None, current_se self.replicator_status = replicator_status self.replica_status = replica_status self.deployed_service_replica_query_result = deployed_service_replica_query_result - self.service_kind = 'Stateful' class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): @@ -8938,61 +10854,50 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): All required parameters must be populated in order to send to Azure. - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this - replica. + :param code_package_name: The name of the code package that hosts this replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or - ChangeRole. + :param address: The last address returned by the replica in Open or ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the - replica. This will be zero if the replica is down. In hyper-v containers - this host process ID will be from different kernel. + :param host_process_id: Host process ID of the process that is hosting the replica. This will + be zero if the replica is down. In hyper-v containers this host process ID will be from + different kernel. :type host_process_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str - :param replica_role: The role of a replica of a stateful service. Possible - values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', - 'ActiveSecondary' + :param replica_role: The role of a replica of a stateful service. Possible values include: + "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". :type replica_role: str or ~azure.servicefabric.models.ReplicaRole - :param reconfiguration_information: Information about current - reconfiguration like phase, type, previous configuration role of replica - and reconfiguration start date time. - :type reconfiguration_information: - ~azure.servicefabric.models.ReconfigurationInformation + :param reconfiguration_information: Information about current reconfiguration like phase, type, + previous configuration role of replica and reconfiguration start date time. + :type reconfiguration_information: ~azure.servicefabric.models.ReconfigurationInformation """ _validation = { @@ -9000,6 +10905,7 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -9009,58 +10915,67 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, 'reconfiguration_information': {'key': 'ReconfigurationInformation', 'type': 'ReconfigurationInformation'}, } - def __init__(self, *, service_name: str=None, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, partition_id: str=None, replica_status=None, address: str=None, service_package_activation_id: str=None, host_process_id: str=None, replica_id: str=None, replica_role=None, reconfiguration_information=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + service_type_name: Optional[str] = None, + service_manifest_name: Optional[str] = None, + code_package_name: Optional[str] = None, + partition_id: Optional[str] = None, + replica_status: Optional[Union[str, "ReplicaStatus"]] = None, + address: Optional[str] = None, + service_package_activation_id: Optional[str] = None, + host_process_id: Optional[str] = None, + replica_id: Optional[str] = None, + replica_role: Optional[Union[str, "ReplicaRole"]] = None, + reconfiguration_information: Optional["ReconfigurationInformation"] = None, + **kwargs + ): super(DeployedStatefulServiceReplicaInfo, self).__init__(service_name=service_name, service_type_name=service_type_name, service_manifest_name=service_manifest_name, code_package_name=code_package_name, partition_id=partition_id, replica_status=replica_status, address=address, service_package_activation_id=service_package_activation_id, host_process_id=host_process_id, **kwargs) + self.service_kind = 'Stateful' # type: str self.replica_id = replica_id self.replica_role = replica_role self.reconfiguration_information = reconfiguration_information - self.service_kind = 'Stateful' class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInfo): - """Information about a stateless instance running in a code package. Note that - DeployedServiceReplicaQueryResult will contain duplicate data like - ServiceKind, ServiceName, PartitionId and InstanceId. + """Information about a stateless instance running in a code package. Note that DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and InstanceId. All required parameters must be populated in order to send to Azure. - :param service_name: Full hierarchical name of the service in URI format - starting with `fabric:`. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: Full hierarchical name of the service in URI format starting with + ``fabric:``. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle - operation on a stateful service replica or stateless service instance. - Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', - 'Abort' - :type current_service_operation: str or - ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the - current service operation in UTC format. - :type current_service_operation_start_time_utc: datetime + :param current_service_operation: Specifies the current active life-cycle operation on a + stateful service replica or stateless service instance. Possible values include: "Unknown", + "None", "Open", "ChangeRole", "Close", "Abort". + :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the current service + operation in UTC format. + :type current_service_operation_start_time_utc: ~datetime.datetime :param reported_load: List of load reported by replica. - :type reported_load: - list[~azure.servicefabric.models.LoadMetricReportInfo] - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param instance_id: Id of a stateless service instance. InstanceId is used - by Service Fabric to uniquely identify an instance of a partition of a - stateless service. It is unique within a partition and does not change for - the lifetime of the instance. If the instance has failed over on the same - or different node, it will get a different value for the InstanceId. + :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] + :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to + uniquely identify an instance of a partition of a stateless service. It is unique within a + partition and does not change for the lifetime of the instance. If the instance has failed over + on the same or different node, it will get a different value for the InstanceId. :type instance_id: str - :param deployed_service_replica_query_result: Information about a - stateless service instance deployed on a node. + :param deployed_service_replica_query_result: Information about a stateless service instance + deployed on a node. :type deployed_service_replica_query_result: ~azure.servicefabric.models.DeployedStatelessServiceInstanceInfo """ @@ -9070,21 +10985,32 @@ class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInf } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatelessServiceInstanceInfo'}, } - def __init__(self, *, service_name: str=None, partition_id: str=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, instance_id: str=None, deployed_service_replica_query_result=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + partition_id: Optional[str] = None, + current_service_operation: Optional[Union[str, "ServiceOperationName"]] = None, + current_service_operation_start_time_utc: Optional[datetime.datetime] = None, + reported_load: Optional[List["LoadMetricReportInfo"]] = None, + instance_id: Optional[str] = None, + deployed_service_replica_query_result: Optional["DeployedStatelessServiceInstanceInfo"] = None, + **kwargs + ): super(DeployedStatelessServiceInstanceDetailInfo, self).__init__(service_name=service_name, partition_id=partition_id, current_service_operation=current_service_operation, current_service_operation_start_time_utc=current_service_operation_start_time_utc, reported_load=reported_load, **kwargs) + self.service_kind = 'Stateless' # type: str self.instance_id = instance_id self.deployed_service_replica_query_result = deployed_service_replica_query_result - self.service_kind = 'Stateless' class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): @@ -9092,49 +11018,42 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): All required parameters must be populated in order to send to Azure. - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this - replica. + :param code_package_name: The name of the code package that hosts this replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or - ChangeRole. + :param address: The last address returned by the replica in Open or ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the - replica. This will be zero if the replica is down. In hyper-v containers - this host process ID will be from different kernel. + :param host_process_id: Host process ID of the process that is hosting the replica. This will + be zero if the replica is down. In hyper-v containers this host process ID will be from + different kernel. :type host_process_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param instance_id: Id of a stateless service instance. InstanceId is used - by Service Fabric to uniquely identify an instance of a partition of a - stateless service. It is unique within a partition and does not change for - the lifetime of the instance. If the instance has failed over on the same - or different node, it will get a different value for the InstanceId. + :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to + uniquely identify an instance of a partition of a stateless service. It is unique within a + partition and does not change for the lifetime of the instance. If the instance has failed over + on the same or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -9143,6 +11062,7 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -9152,36 +11072,47 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, *, service_name: str=None, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, partition_id: str=None, replica_status=None, address: str=None, service_package_activation_id: str=None, host_process_id: str=None, instance_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + service_type_name: Optional[str] = None, + service_manifest_name: Optional[str] = None, + code_package_name: Optional[str] = None, + partition_id: Optional[str] = None, + replica_status: Optional[Union[str, "ReplicaStatus"]] = None, + address: Optional[str] = None, + service_package_activation_id: Optional[str] = None, + host_process_id: Optional[str] = None, + instance_id: Optional[str] = None, + **kwargs + ): super(DeployedStatelessServiceInstanceInfo, self).__init__(service_name=service_name, service_type_name=service_type_name, service_manifest_name=service_manifest_name, code_package_name=code_package_name, partition_id=partition_id, replica_status=replica_status, address=address, service_package_activation_id=service_package_activation_id, host_process_id=host_process_id, **kwargs) + self.service_kind = 'Stateless' # type: str self.instance_id = instance_id - self.service_kind = 'Stateless' -class DeployServicePackageToNodeDescription(Model): - """Defines description for downloading packages associated with a service - manifest to image cache on a Service Fabric node. +class DeployServicePackageToNodeDescription(msrest.serialization.Model): + """Defines description for downloading packages associated with a service manifest to image cache on a Service Fabric node. All required parameters must be populated in order to send to Azure. - :param service_manifest_name: Required. The name of service manifest whose - packages need to be downloaded. + :param service_manifest_name: Required. The name of service manifest whose packages need to be + downloaded. :type service_manifest_name: str - :param application_type_name: Required. The application type name as - defined in the application manifest. + :param application_type_name: Required. The application type name as defined in the application + manifest. :type application_type_name: str - :param application_type_version: Required. The version of the application - type as defined in the application manifest. + :param application_type_version: Required. The version of the application type as defined in + the application manifest. :type application_type_version: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param package_sharing_policy: List of package sharing policy information. - :type package_sharing_policy: - list[~azure.servicefabric.models.PackageSharingPolicyInfo] + :type package_sharing_policy: list[~azure.servicefabric.models.PackageSharingPolicyInfo] """ _validation = { @@ -9199,7 +11130,16 @@ class DeployServicePackageToNodeDescription(Model): 'package_sharing_policy': {'key': 'PackageSharingPolicy', 'type': '[PackageSharingPolicyInfo]'}, } - def __init__(self, *, service_manifest_name: str, application_type_name: str, application_type_version: str, node_name: str, package_sharing_policy=None, **kwargs) -> None: + def __init__( + self, + *, + service_manifest_name: str, + application_type_name: str, + application_type_version: str, + node_name: str, + package_sharing_policy: Optional[List["PackageSharingPolicyInfo"]] = None, + **kwargs + ): super(DeployServicePackageToNodeDescription, self).__init__(**kwargs) self.service_manifest_name = service_manifest_name self.application_type_name = application_type_name @@ -9208,15 +11148,15 @@ def __init__(self, *, service_manifest_name: str, application_type_name: str, ap self.package_sharing_policy = package_sharing_policy -class DiagnosticsDescription(Model): +class DiagnosticsDescription(msrest.serialization.Model): """Describes the diagnostics options available. :param sinks: List of supported sinks that can be referenced. :type sinks: list[~azure.servicefabric.models.DiagnosticsSinkProperties] :param enabled: Status of whether or not sinks are enabled. :type enabled: bool - :param default_sink_refs: The sinks to be used if diagnostics is enabled. - Sink choices can be overridden at the service and code package level. + :param default_sink_refs: The sinks to be used if diagnostics is enabled. Sink choices can be + overridden at the service and code package level. :type default_sink_refs: list[str] """ @@ -9226,20 +11166,27 @@ class DiagnosticsDescription(Model): 'default_sink_refs': {'key': 'defaultSinkRefs', 'type': '[str]'}, } - def __init__(self, *, sinks=None, enabled: bool=None, default_sink_refs=None, **kwargs) -> None: + def __init__( + self, + *, + sinks: Optional[List["DiagnosticsSinkProperties"]] = None, + enabled: Optional[bool] = None, + default_sink_refs: Optional[List[str]] = None, + **kwargs + ): super(DiagnosticsDescription, self).__init__(**kwargs) self.sinks = sinks self.enabled = enabled self.default_sink_refs = default_sink_refs -class DiagnosticsRef(Model): +class DiagnosticsRef(msrest.serialization.Model): """Reference to sinks in DiagnosticsDescription. :param enabled: Status of whether or not sinks are enabled. :type enabled: bool - :param sink_refs: List of sinks to be used if enabled. References the list - of sinks in DiagnosticsDescription. + :param sink_refs: List of sinks to be used if enabled. References the list of sinks in + DiagnosticsDescription. :type sink_refs: list[str] """ @@ -9248,21 +11195,26 @@ class DiagnosticsRef(Model): 'sink_refs': {'key': 'sinkRefs', 'type': '[str]'}, } - def __init__(self, *, enabled: bool=None, sink_refs=None, **kwargs) -> None: + def __init__( + self, + *, + enabled: Optional[bool] = None, + sink_refs: Optional[List[str]] = None, + **kwargs + ): super(DiagnosticsRef, self).__init__(**kwargs) self.enabled = enabled self.sink_refs = sink_refs -class DisableBackupDescription(Model): - """It describes the body parameters while disabling backup of a backup - entity(Application/Service/Partition). +class DisableBackupDescription(msrest.serialization.Model): + """It describes the body parameters while disabling backup of a backup entity(Application/Service/Partition). All required parameters must be populated in order to send to Azure. - :param clean_backup: Required. Boolean flag to delete backups. It can be - set to true for deleting all the backups which were created for the backup - entity that is getting disabled for backup. + :param clean_backup: Required. Boolean flag to delete backups. It can be set to true for + deleting all the backups which were created for the backup entity that is getting disabled for + backup. :type clean_backup: bool """ @@ -9274,17 +11226,22 @@ class DisableBackupDescription(Model): 'clean_backup': {'key': 'CleanBackup', 'type': 'bool'}, } - def __init__(self, *, clean_backup: bool, **kwargs) -> None: + def __init__( + self, + *, + clean_backup: bool, + **kwargs + ): super(DisableBackupDescription, self).__init__(**kwargs) self.clean_backup = clean_backup -class DiskInfo(Model): +class DiskInfo(msrest.serialization.Model): """Information about the disk. - :param capacity: the disk size in bytes + :param capacity: the disk size in bytes. :type capacity: str - :param available_space: the available disk space in bytes + :param available_space: the available disk space in bytes. :type available_space: str """ @@ -9293,7 +11250,13 @@ class DiskInfo(Model): 'available_space': {'key': 'AvailableSpace', 'type': 'str'}, } - def __init__(self, *, capacity: str=None, available_space: str=None, **kwargs) -> None: + def __init__( + self, + *, + capacity: Optional[str] = None, + available_space: Optional[str] = None, + **kwargs + ): super(DiskInfo, self).__init__(**kwargs) self.capacity = capacity self.available_space = available_space @@ -9304,8 +11267,10 @@ class DoublePropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind :param data: Required. The data of the property value. :type data: float """ @@ -9320,27 +11285,33 @@ class DoublePropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'float'}, } - def __init__(self, *, data: float, **kwargs) -> None: + def __init__( + self, + *, + data: float, + **kwargs + ): super(DoublePropertyValue, self).__init__(**kwargs) + self.kind = 'Double' # type: str self.data = data - self.kind = 'Double' class DsmsAzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Dsms Azure blob store used for storing and - enumerating backups. + """Describes the parameters for Dsms Azure blob store used for storing and enumerating backups. All required parameters must be populated in order to send to Azure. + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_kind: Required. Constant filled by server. - :type storage_kind: str - :param storage_credentials_source_location: Required. The source location - of the storage credentials to connect to the Dsms Azure blob store. + :param storage_credentials_source_location: Required. The source location of the storage + credentials to connect to the Dsms Azure blob store. :type storage_credentials_source_location: str - :param container_name: Required. The name of the container in the blob - store to store and enumerate backups from. + :param container_name: Required. The name of the container in the blob store to store and + enumerate backups from. :type container_name: str """ @@ -9351,26 +11322,33 @@ class DsmsAzureBlobBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_credentials_source_location': {'key': 'StorageCredentialsSourceLocation', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__(self, *, storage_credentials_source_location: str, container_name: str, friendly_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + storage_credentials_source_location: str, + container_name: str, + friendly_name: Optional[str] = None, + **kwargs + ): super(DsmsAzureBlobBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) + self.storage_kind = 'DsmsAzureBlobStore' # type: str self.storage_credentials_source_location = storage_credentials_source_location self.container_name = container_name - self.storage_kind = 'DsmsAzureBlobStore' -class EnableBackupDescription(Model): +class EnableBackupDescription(msrest.serialization.Model): """Specifies the parameters needed to enable periodic backup. All required parameters must be populated in order to send to Azure. - :param backup_policy_name: Required. Name of the backup policy to be used - for enabling periodic backups. + :param backup_policy_name: Required. Name of the backup policy to be used for enabling periodic + backups. :type backup_policy_name: str """ @@ -9382,12 +11360,17 @@ class EnableBackupDescription(Model): 'backup_policy_name': {'key': 'BackupPolicyName', 'type': 'str'}, } - def __init__(self, *, backup_policy_name: str, **kwargs) -> None: + def __init__( + self, + *, + backup_policy_name: str, + **kwargs + ): super(EnableBackupDescription, self).__init__(**kwargs) self.backup_policy_name = backup_policy_name -class EndpointProperties(Model): +class EndpointProperties(msrest.serialization.Model): """Describes a container endpoint. All required parameters must be populated in order to send to Azure. @@ -9407,13 +11390,19 @@ class EndpointProperties(Model): 'port': {'key': 'port', 'type': 'int'}, } - def __init__(self, *, name: str, port: int=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + port: Optional[int] = None, + **kwargs + ): super(EndpointProperties, self).__init__(**kwargs) self.name = name self.port = port -class EndpointRef(Model): +class EndpointRef(msrest.serialization.Model): """Describes a reference to a service endpoint. :param name: Name of the endpoint. @@ -9424,23 +11413,30 @@ class EndpointRef(Model): 'name': {'key': 'name', 'type': 'str'}, } - def __init__(self, *, name: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + **kwargs + ): super(EndpointRef, self).__init__(**kwargs) self.name = name -class SafetyCheck(Model): - """Represents a safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service - and the reliability of the state. +class SafetyCheck(msrest.serialization.Model): + """Represents a safety check performed by service fabric before continuing with the operations. These checks ensure the availability of the service and the reliability of the state. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionSafetyCheck, SeedNodeSafetyCheck + sub-classes are: SeedNodeSafetyCheck, PartitionSafetyCheck. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind """ _validation = { @@ -9452,30 +11448,32 @@ class SafetyCheck(Model): } _subtype_map = { - 'kind': {'PartitionSafetyCheck': 'PartitionSafetyCheck', 'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck'} + 'kind': {'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck', 'PartitionSafetyCheck': 'PartitionSafetyCheck'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(SafetyCheck, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class PartitionSafetyCheck(SafetyCheck): - """Represents a safety check for the service partition being performed by - service fabric before continuing with operations. + """Represents a safety check for the service partition being performed by service fabric before continuing with operations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: EnsureAvailabilitySafetyCheck, - EnsurePartitionQuorumSafetyCheck, WaitForInbuildReplicaSafetyCheck, - WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, - WaitForReconfigurationSafetyCheck + sub-classes are: EnsureAvailabilitySafetyCheck, EnsurePartitionQuorumSafetyCheck, WaitForInbuildReplicaSafetyCheck, WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, WaitForReconfigurationSafetyCheck. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -9492,23 +11490,29 @@ class PartitionSafetyCheck(SafetyCheck): 'kind': {'EnsureAvailability': 'EnsureAvailabilitySafetyCheck', 'EnsurePartitionQuorum': 'EnsurePartitionQuorumSafetyCheck', 'WaitForInbuildReplica': 'WaitForInbuildReplicaSafetyCheck', 'WaitForPrimaryPlacement': 'WaitForPrimaryPlacementSafetyCheck', 'WaitForPrimarySwap': 'WaitForPrimarySwapSafetyCheck', 'WaitForReconfiguration': 'WaitForReconfigurationSafetyCheck'} } - def __init__(self, *, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + **kwargs + ): super(PartitionSafetyCheck, self).__init__(**kwargs) + self.kind = 'PartitionSafetyCheck' # type: str self.partition_id = partition_id - self.kind = 'PartitionSafetyCheck' class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): - """Safety check that waits to ensure the availability of the partition. It - waits until there are replicas available such that bringing down this - replica will not cause availability loss for the partition. + """Safety check that waits to ensure the availability of the partition. It waits until there are replicas available such that bringing down this replica will not cause availability loss for the partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -9521,21 +11525,28 @@ class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + **kwargs + ): super(EnsureAvailabilitySafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'EnsureAvailability' + self.kind = 'EnsureAvailability' # type: str class EnsurePartitionQuorumSafetyCheck(PartitionSafetyCheck): - """Safety check that ensures that a quorum of replicas are not lost for a - partition. + """Safety check that ensures that a quorum of replicas are not lost for a partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -9548,21 +11559,24 @@ class EnsurePartitionQuorumSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + **kwargs + ): super(EnsurePartitionQuorumSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'EnsurePartitionQuorum' + self.kind = 'EnsurePartitionQuorum' # type: str -class EntityKindHealthStateCount(Model): +class EntityKindHealthStateCount(msrest.serialization.Model): """Represents health state count for entities of the specified entity kind. - :param entity_kind: The entity kind for which health states are evaluated. - Possible values include: 'Invalid', 'Node', 'Partition', 'Service', - 'Application', 'Replica', 'DeployedApplication', 'DeployedServicePackage', - 'Cluster' + :param entity_kind: The entity kind for which health states are evaluated. Possible values + include: "Invalid", "Node", "Partition", "Service", "Application", "Replica", + "DeployedApplication", "DeployedServicePackage", "Cluster". :type entity_kind: str or ~azure.servicefabric.models.EntityKind - :param health_state_count: The health state count for the entities of the - specified kind. + :param health_state_count: The health state count for the entities of the specified kind. :type health_state_count: ~azure.servicefabric.models.HealthStateCount """ @@ -9571,23 +11585,28 @@ class EntityKindHealthStateCount(Model): 'health_state_count': {'key': 'HealthStateCount', 'type': 'HealthStateCount'}, } - def __init__(self, *, entity_kind=None, health_state_count=None, **kwargs) -> None: + def __init__( + self, + *, + entity_kind: Optional[Union[str, "EntityKind"]] = None, + health_state_count: Optional["HealthStateCount"] = None, + **kwargs + ): super(EntityKindHealthStateCount, self).__init__(**kwargs) self.entity_kind = entity_kind self.health_state_count = health_state_count -class EnvironmentVariable(Model): +class EnvironmentVariable(msrest.serialization.Model): """Describes an environment variable for the container. - :param type: The type of the environment variable being given in value. - Possible values include: 'ClearText', 'KeyVaultReference', - 'SecretValueReference'. Default value: "ClearText" . + :param type: The type of the environment variable being given in value. Possible values + include: "ClearText", "KeyVaultReference", "SecretValueReference". Default value: "ClearText". :type type: str or ~azure.servicefabric.models.EnvironmentVariableType :param name: The name of the environment variable. :type name: str - :param value: The value of the environment variable, will be processed - based on the type provided. + :param value: The value of the environment variable, will be processed based on the type + provided. :type value: str """ @@ -9597,28 +11616,30 @@ class EnvironmentVariable(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, *, type="ClearText", name: str=None, value: str=None, **kwargs) -> None: + def __init__( + self, + *, + type: Optional[Union[str, "EnvironmentVariableType"]] = "ClearText", + name: Optional[str] = None, + value: Optional[str] = None, + **kwargs + ): super(EnvironmentVariable, self).__init__(**kwargs) self.type = type self.name = name self.value = value -class Epoch(Model): - """An Epoch is a configuration number for the partition as a whole. When the - configuration of the replica set changes, for example when the Primary - replica changes, the operations that are replicated from the new Primary - replica are said to be a new Epoch from the ones which were sent by the old - Primary replica. +class Epoch(msrest.serialization.Model): + """An Epoch is a configuration number for the partition as a whole. When the configuration of the replica set changes, for example when the Primary replica changes, the operations that are replicated from the new Primary replica are said to be a new Epoch from the ones which were sent by the old Primary replica. - :param configuration_version: The current configuration number of this - Epoch. The configuration number is an increasing value that is updated - whenever the configuration of this replica set changes. + :param configuration_version: The current configuration number of this Epoch. The configuration + number is an increasing value that is updated whenever the configuration of this replica set + changes. :type configuration_version: str - :param data_loss_version: The current data loss number of this Epoch. The - data loss number property is an increasing value which is updated whenever - data loss is suspected, as when loss of a quorum of replicas in the - replica set that includes the Primary replica. + :param data_loss_version: The current data loss number of this Epoch. The data loss number + property is an increasing value which is updated whenever data loss is suspected, as when loss + of a quorum of replicas in the replica set that includes the Primary replica. :type data_loss_version: str """ @@ -9627,38 +11648,46 @@ class Epoch(Model): 'data_loss_version': {'key': 'DataLossVersion', 'type': 'str'}, } - def __init__(self, *, configuration_version: str=None, data_loss_version: str=None, **kwargs) -> None: + def __init__( + self, + *, + configuration_version: Optional[str] = None, + data_loss_version: Optional[str] = None, + **kwargs + ): super(Epoch, self).__init__(**kwargs) self.configuration_version = configuration_version self.data_loss_version = data_loss_version class EventHealthEvaluation(HealthEvaluation): - """Represents health evaluation of a HealthEvent that was reported on the - entity. - The health evaluation is returned when evaluating health of an entity - results in Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation of a HealthEvent that was reported on the entity. +The health evaluation is returned when evaluating health of an entity results in Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param consider_warning_as_error: Indicates whether warnings are treated - with the same severity as errors. The field is specified in the health - policy used to evaluate the entity. + :param consider_warning_as_error: Indicates whether warnings are treated with the same severity + as errors. The field is specified in the health policy used to evaluate the entity. :type consider_warning_as_error: bool - :param unhealthy_event: Represents health information reported on a health - entity, such as cluster, application or node, with additional metadata - added by the Health Manager. + :param unhealthy_event: Represents health information reported on a health entity, such as + cluster, application or node, with additional metadata added by the Health Manager. :type unhealthy_event: ~azure.servicefabric.models.HealthEvent """ @@ -9667,152 +11696,173 @@ class EventHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'consider_warning_as_error': {'key': 'ConsiderWarningAsError', 'type': 'bool'}, 'unhealthy_event': {'key': 'UnhealthyEvent', 'type': 'HealthEvent'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, consider_warning_as_error: bool=None, unhealthy_event=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + consider_warning_as_error: Optional[bool] = None, + unhealthy_event: Optional["HealthEvent"] = None, + **kwargs + ): super(EventHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Event' # type: str self.consider_warning_as_error = consider_warning_as_error self.unhealthy_event = unhealthy_event - self.kind = 'Event' class ExecutingFaultsChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos has decided on the - faults for an iteration. This Chaos event contains the details of the - faults as a list of strings. + """Describes a Chaos event that gets generated when Chaos has decided on the faults for an iteration. This Chaos event contains the details of the faults as a list of strings. All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param faults: List of string description of the faults that Chaos decided - to execute in an iteration. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param faults: List of string description of the faults that Chaos decided to execute in an + iteration. :type faults: list[str] """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'faults': {'key': 'Faults', 'type': '[str]'}, } - def __init__(self, *, time_stamp_utc, faults=None, **kwargs) -> None: + def __init__( + self, + *, + time_stamp_utc: datetime.datetime, + faults: Optional[List[str]] = None, + **kwargs + ): super(ExecutingFaultsChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.kind = 'ExecutingFaults' # type: str self.faults = faults - self.kind = 'ExecutingFaults' -class ProvisionApplicationTypeDescriptionBase(Model): - """Represents the type of registration or provision requested, and if the - operation needs to be asynchronous or not. Supported types of provision - operations are from either image store or external store. +class ProvisionApplicationTypeDescriptionBase(msrest.serialization.Model): + """Represents the type of registration or provision requested, and if the operation needs to be asynchronous or not. Supported types of provision operations are from either image store or external store. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ProvisionApplicationTypeDescription, - ExternalStoreProvisionApplicationTypeDescription + sub-classes are: ExternalStoreProvisionApplicationTypeDescription, ProvisionApplicationTypeDescription. All required parameters must be populated in order to send to Azure. - :param async_property: Required. Indicates whether or not provisioning - should occur asynchronously. When set to true, the provision operation - returns when the request is accepted by the system, and the provision - operation continues without any timeout limit. The default value is false. - For large application packages, we recommend setting the value to true. + :param kind: Required. The kind of application type registration or provision requested. The + application package can be registered or provisioned either from the image store or from an + external store. Following are the kinds of the application type provision.Constant filled by + server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". + :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind + :param async_property: Required. Indicates whether or not provisioning should occur + asynchronously. When set to true, the provision operation returns when the request is accepted + by the system, and the provision operation continues without any timeout limit. The default + value is false. For large application packages, we recommend setting the value to true. :type async_property: bool - :param kind: Required. Constant filled by server. - :type kind: str """ _validation = { - 'async_property': {'required': True}, 'kind': {'required': True}, + 'async_property': {'required': True}, } _attribute_map = { - 'async_property': {'key': 'Async', 'type': 'bool'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'async_property': {'key': 'Async', 'type': 'bool'}, } _subtype_map = { - 'kind': {'ImageStorePath': 'ProvisionApplicationTypeDescription', 'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription'} + 'kind': {'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription', 'ImageStorePath': 'ProvisionApplicationTypeDescription'} } - def __init__(self, *, async_property: bool, **kwargs) -> None: + def __init__( + self, + *, + async_property: bool, + **kwargs + ): super(ProvisionApplicationTypeDescriptionBase, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.async_property = async_property - self.kind = None class ExternalStoreProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): - """Describes the operation to register or provision an application type using - an application package from an external store instead of a package uploaded - to the Service Fabric image store. + """Describes the operation to register or provision an application type using an application package from an external store instead of a package uploaded to the Service Fabric image store. All required parameters must be populated in order to send to Azure. - :param async_property: Required. Indicates whether or not provisioning - should occur asynchronously. When set to true, the provision operation - returns when the request is accepted by the system, and the provision - operation continues without any timeout limit. The default value is false. - For large application packages, we recommend setting the value to true. + :param kind: Required. The kind of application type registration or provision requested. The + application package can be registered or provisioned either from the image store or from an + external store. Following are the kinds of the application type provision.Constant filled by + server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". + :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind + :param async_property: Required. Indicates whether or not provisioning should occur + asynchronously. When set to true, the provision operation returns when the request is accepted + by the system, and the provision operation continues without any timeout limit. The default + value is false. For large application packages, we recommend setting the value to true. :type async_property: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_package_download_uri: Required. The path to the - '.sfpkg' application package from where the application package can be - downloaded using HTTP or HTTPS protocols. The application package can be - stored in an external store that provides GET operation to download the - file. Supported protocols are HTTP and HTTPS, and the path must allow READ - access. + :param application_package_download_uri: Required. The path to the '.sfpkg' application package + from where the application package can be downloaded using HTTP or HTTPS protocols. The + application package can be stored in an external store that provides GET operation to download + the file. Supported protocols are HTTP and HTTPS, and the path must allow READ access. :type application_package_download_uri: str - :param application_type_name: Required. The application type name - represents the name of the application type found in the application - manifest. + :param application_type_name: Required. The application type name represents the name of the + application type found in the application manifest. :type application_type_name: str - :param application_type_version: Required. The application type version - represents the version of the application type found in the application - manifest. + :param application_type_version: Required. The application type version represents the version + of the application type found in the application manifest. :type application_type_version: str """ _validation = { - 'async_property': {'required': True}, 'kind': {'required': True}, + 'async_property': {'required': True}, 'application_package_download_uri': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, } _attribute_map = { - 'async_property': {'key': 'Async', 'type': 'bool'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'async_property': {'key': 'Async', 'type': 'bool'}, 'application_package_download_uri': {'key': 'ApplicationPackageDownloadUri', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, } - def __init__(self, *, async_property: bool, application_package_download_uri: str, application_type_name: str, application_type_version: str, **kwargs) -> None: + def __init__( + self, + *, + async_property: bool, + application_package_download_uri: str, + application_type_name: str, + application_type_version: str, + **kwargs + ): super(ExternalStoreProvisionApplicationTypeDescription, self).__init__(async_property=async_property, **kwargs) + self.kind = 'ExternalStore' # type: str self.application_package_download_uri = application_package_download_uri self.application_type_name = application_type_name self.application_type_version = application_type_version - self.kind = 'ExternalStore' -class FabricCodeVersionInfo(Model): +class FabricCodeVersionInfo(msrest.serialization.Model): """Information about a Service Fabric code version. :param code_version: The product version of Service Fabric. @@ -9823,12 +11873,17 @@ class FabricCodeVersionInfo(Model): 'code_version': {'key': 'CodeVersion', 'type': 'str'}, } - def __init__(self, *, code_version: str=None, **kwargs) -> None: + def __init__( + self, + *, + code_version: Optional[str] = None, + **kwargs + ): super(FabricCodeVersionInfo, self).__init__(**kwargs) self.code_version = code_version -class FabricConfigVersionInfo(Model): +class FabricConfigVersionInfo(msrest.serialization.Model): """Information about a Service Fabric config version. :param config_version: The config version of Service Fabric. @@ -9839,20 +11894,22 @@ class FabricConfigVersionInfo(Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__(self, *, config_version: str=None, **kwargs) -> None: + def __init__( + self, + *, + config_version: Optional[str] = None, + **kwargs + ): super(FabricConfigVersionInfo, self).__init__(**kwargs) self.config_version = config_version -class FabricError(Model): - """The REST API operations for Service Fabric return standard HTTP status - codes. This type defines the additional information returned from the - Service Fabric API operations that are not successful. +class FabricError(msrest.serialization.Model): + """The REST API operations for Service Fabric return standard HTTP status codes. This type defines the additional information returned from the Service Fabric API operations that are not successful. All required parameters must be populated in order to send to Azure. - :param error: Required. Error object containing error code and error - message. + :param error: Required. Error object containing error code and error message. :type error: ~azure.servicefabric.models.FabricErrorError """ @@ -9864,184 +11921,184 @@ class FabricError(Model): 'error': {'key': 'Error', 'type': 'FabricErrorError'}, } - def __init__(self, *, error, **kwargs) -> None: + def __init__( + self, + *, + error: "FabricErrorError", + **kwargs + ): super(FabricError, self).__init__(**kwargs) self.error = error -class FabricErrorException(HttpOperationError): - """Server responsed with exception of type: 'FabricError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, deserialize, response, *args): - - super(FabricErrorException, self).__init__(deserialize, response, 'FabricError', *args) - - -class FabricErrorError(Model): +class FabricErrorError(msrest.serialization.Model): """Error object containing error code and error message. All required parameters must be populated in order to send to Azure. - :param code: Required. Defines the fabric error codes that be returned as - part of the error object in response to Service Fabric API operations that - are not successful. Following are the error code values that can be - returned for a specific HTTP status code. - - Possible values of the error code for HTTP status code 400 (Bad Request) - - "FABRIC_E_INVALID_PARTITION_KEY" - - "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" - - "FABRIC_E_INVALID_ADDRESS" - - "FABRIC_E_APPLICATION_NOT_UPGRADING" - - "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" - - "FABRIC_E_FABRIC_NOT_UPGRADING" - - "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" - - "FABRIC_E_INVALID_CONFIGURATION" - - "FABRIC_E_INVALID_NAME_URI" - - "FABRIC_E_PATH_TOO_LONG" - - "FABRIC_E_KEY_TOO_LARGE" - - "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" - - "FABRIC_E_INVALID_ATOMIC_GROUP" - - "FABRIC_E_VALUE_EMPTY" - - "FABRIC_E_BACKUP_IS_ENABLED" - - "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" - - "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" - - "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" - - "E_INVALIDARG" - - Possible values of the error code for HTTP status code 404 (Not Found) - - "FABRIC_E_NODE_NOT_FOUND" - - "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" - - "FABRIC_E_APPLICATION_NOT_FOUND" - - "FABRIC_E_SERVICE_TYPE_NOT_FOUND" - - "FABRIC_E_SERVICE_DOES_NOT_EXIST" - - "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" - - "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" - - "FABRIC_E_PARTITION_NOT_FOUND" - - "FABRIC_E_REPLICA_DOES_NOT_EXIST" - - "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" - - "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" - - "FABRIC_E_DIRECTORY_NOT_FOUND" - - "FABRIC_E_FABRIC_VERSION_NOT_FOUND" - - "FABRIC_E_FILE_NOT_FOUND" - - "FABRIC_E_NAME_DOES_NOT_EXIST" - - "FABRIC_E_PROPERTY_DOES_NOT_EXIST" - - "FABRIC_E_ENUMERATION_COMPLETED" - - "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" - - "FABRIC_E_KEY_NOT_FOUND" - - "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" - - "FABRIC_E_BACKUP_NOT_ENABLED" - - "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" - - "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" - - "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" - - Possible values of the error code for HTTP status code 409 (Conflict) - - "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" - - "FABRIC_E_APPLICATION_ALREADY_EXISTS" - - "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" - - "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" - - "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" - - "FABRIC_E_SERVICE_ALREADY_EXISTS" - - "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" - - "FABRIC_E_APPLICATION_TYPE_IN_USE" - - "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" - - "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" - - "FABRIC_E_FABRIC_VERSION_IN_USE" - - "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" - - "FABRIC_E_NAME_ALREADY_EXISTS" - - "FABRIC_E_NAME_NOT_EMPTY" - - "FABRIC_E_PROPERTY_CHECK_FAILED" - - "FABRIC_E_SERVICE_METADATA_MISMATCH" - - "FABRIC_E_SERVICE_TYPE_MISMATCH" - - "FABRIC_E_HEALTH_STALE_REPORT" - - "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" - - "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" - - "FABRIC_E_INSTANCE_ID_MISMATCH" - - "FABRIC_E_BACKUP_IN_PROGRESS" - - "FABRIC_E_RESTORE_IN_PROGRESS" - - "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" - - Possible values of the error code for HTTP status code 413 (Request - Entity Too Large) - - "FABRIC_E_VALUE_TOO_LARGE" - - Possible values of the error code for HTTP status code 500 (Internal - Server Error) - - "FABRIC_E_NODE_IS_UP" - - "E_FAIL" - - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" - - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" - - "FABRIC_E_VOLUME_ALREADY_EXISTS" - - "FABRIC_E_VOLUME_NOT_FOUND" - - "SerializationError" - - Possible values of the error code for HTTP status code 503 (Service - Unavailable) - - "FABRIC_E_NO_WRITE_QUORUM" - - "FABRIC_E_NOT_PRIMARY" - - "FABRIC_E_NOT_READY" - - "FABRIC_E_RECONFIGURATION_PENDING" - - "FABRIC_E_SERVICE_OFFLINE" - - "E_ABORT" - - "FABRIC_E_VALUE_TOO_LARGE" - - Possible values of the error code for HTTP status code 504 (Gateway - Timeout) - - "FABRIC_E_COMMUNICATION_ERROR" - - "FABRIC_E_OPERATION_NOT_COMPLETE" - - "FABRIC_E_TIMEOUT". Possible values include: - 'FABRIC_E_INVALID_PARTITION_KEY', - 'FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR', 'FABRIC_E_INVALID_ADDRESS', - 'FABRIC_E_APPLICATION_NOT_UPGRADING', - 'FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR', - 'FABRIC_E_FABRIC_NOT_UPGRADING', - 'FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR', - 'FABRIC_E_INVALID_CONFIGURATION', 'FABRIC_E_INVALID_NAME_URI', - 'FABRIC_E_PATH_TOO_LONG', 'FABRIC_E_KEY_TOO_LARGE', - 'FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED', - 'FABRIC_E_INVALID_ATOMIC_GROUP', 'FABRIC_E_VALUE_EMPTY', - 'FABRIC_E_NODE_NOT_FOUND', 'FABRIC_E_APPLICATION_TYPE_NOT_FOUND', - 'FABRIC_E_APPLICATION_NOT_FOUND', 'FABRIC_E_SERVICE_TYPE_NOT_FOUND', - 'FABRIC_E_SERVICE_DOES_NOT_EXIST', - 'FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND', - 'FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND', - 'FABRIC_E_PARTITION_NOT_FOUND', 'FABRIC_E_REPLICA_DOES_NOT_EXIST', - 'FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST', - 'FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND', - 'FABRIC_E_DIRECTORY_NOT_FOUND', 'FABRIC_E_FABRIC_VERSION_NOT_FOUND', - 'FABRIC_E_FILE_NOT_FOUND', 'FABRIC_E_NAME_DOES_NOT_EXIST', - 'FABRIC_E_PROPERTY_DOES_NOT_EXIST', 'FABRIC_E_ENUMERATION_COMPLETED', - 'FABRIC_E_SERVICE_MANIFEST_NOT_FOUND', 'FABRIC_E_KEY_NOT_FOUND', - 'FABRIC_E_HEALTH_ENTITY_NOT_FOUND', - 'FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS', - 'FABRIC_E_APPLICATION_ALREADY_EXISTS', - 'FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION', - 'FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS', - 'FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS', - 'FABRIC_E_SERVICE_ALREADY_EXISTS', - 'FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS', - 'FABRIC_E_APPLICATION_TYPE_IN_USE', - 'FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION', - 'FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS', - 'FABRIC_E_FABRIC_VERSION_IN_USE', 'FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS', - 'FABRIC_E_NAME_ALREADY_EXISTS', 'FABRIC_E_NAME_NOT_EMPTY', - 'FABRIC_E_PROPERTY_CHECK_FAILED', 'FABRIC_E_SERVICE_METADATA_MISMATCH', - 'FABRIC_E_SERVICE_TYPE_MISMATCH', 'FABRIC_E_HEALTH_STALE_REPORT', - 'FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED', - 'FABRIC_E_NODE_HAS_NOT_STOPPED_YET', 'FABRIC_E_INSTANCE_ID_MISMATCH', - 'FABRIC_E_VALUE_TOO_LARGE', 'FABRIC_E_NO_WRITE_QUORUM', - 'FABRIC_E_NOT_PRIMARY', 'FABRIC_E_NOT_READY', - 'FABRIC_E_RECONFIGURATION_PENDING', 'FABRIC_E_SERVICE_OFFLINE', 'E_ABORT', - 'FABRIC_E_COMMUNICATION_ERROR', 'FABRIC_E_OPERATION_NOT_COMPLETE', - 'FABRIC_E_TIMEOUT', 'FABRIC_E_NODE_IS_UP', 'E_FAIL', - 'FABRIC_E_BACKUP_IS_ENABLED', - 'FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH', - 'FABRIC_E_INVALID_FOR_STATELESS_SERVICES', 'FABRIC_E_BACKUP_NOT_ENABLED', - 'FABRIC_E_BACKUP_POLICY_NOT_EXISTING', - 'FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING', - 'FABRIC_E_BACKUP_IN_PROGRESS', 'FABRIC_E_RESTORE_IN_PROGRESS', - 'FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING', - 'FABRIC_E_INVALID_SERVICE_SCALING_POLICY', 'E_INVALIDARG', - 'FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS', - 'FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND', - 'FABRIC_E_VOLUME_ALREADY_EXISTS', 'FABRIC_E_VOLUME_NOT_FOUND', - 'SerializationError', 'FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR' + :param code: Required. Defines the fabric error codes that be returned as part of the error + object in response to Service Fabric API operations that are not successful. Following are the + error code values that can be returned for a specific HTTP status code. + + + * + Possible values of the error code for HTTP status code 400 (Bad Request) + + + * "FABRIC_E_INVALID_PARTITION_KEY" + * "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" + * "FABRIC_E_INVALID_ADDRESS" + * "FABRIC_E_APPLICATION_NOT_UPGRADING" + * "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" + * "FABRIC_E_FABRIC_NOT_UPGRADING" + * "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" + * "FABRIC_E_INVALID_CONFIGURATION" + * "FABRIC_E_INVALID_NAME_URI" + * "FABRIC_E_PATH_TOO_LONG" + * "FABRIC_E_KEY_TOO_LARGE" + * "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" + * "FABRIC_E_INVALID_ATOMIC_GROUP" + * "FABRIC_E_VALUE_EMPTY" + * "FABRIC_E_BACKUP_IS_ENABLED" + * "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + * "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + * "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + * "E_INVALIDARG" + + * + Possible values of the error code for HTTP status code 404 (Not Found) + + + * "FABRIC_E_NODE_NOT_FOUND" + * "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" + * "FABRIC_E_APPLICATION_NOT_FOUND" + * "FABRIC_E_SERVICE_TYPE_NOT_FOUND" + * "FABRIC_E_SERVICE_DOES_NOT_EXIST" + * "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" + * "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" + * "FABRIC_E_PARTITION_NOT_FOUND" + * "FABRIC_E_REPLICA_DOES_NOT_EXIST" + * "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" + * "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" + * "FABRIC_E_DIRECTORY_NOT_FOUND" + * "FABRIC_E_FABRIC_VERSION_NOT_FOUND" + * "FABRIC_E_FILE_NOT_FOUND" + * "FABRIC_E_NAME_DOES_NOT_EXIST" + * "FABRIC_E_PROPERTY_DOES_NOT_EXIST" + * "FABRIC_E_ENUMERATION_COMPLETED" + * "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" + * "FABRIC_E_KEY_NOT_FOUND" + * "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + * "FABRIC_E_BACKUP_NOT_ENABLED" + * "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + * "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" + * "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" + + * + Possible values of the error code for HTTP status code 409 (Conflict) + + + * "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" + * "FABRIC_E_APPLICATION_ALREADY_EXISTS" + * "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" + * "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" + * "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" + * "FABRIC_E_SERVICE_ALREADY_EXISTS" + * "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" + * "FABRIC_E_APPLICATION_TYPE_IN_USE" + * "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" + * "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" + * "FABRIC_E_FABRIC_VERSION_IN_USE" + * "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" + * "FABRIC_E_NAME_ALREADY_EXISTS" + * "FABRIC_E_NAME_NOT_EMPTY" + * "FABRIC_E_PROPERTY_CHECK_FAILED" + * "FABRIC_E_SERVICE_METADATA_MISMATCH" + * "FABRIC_E_SERVICE_TYPE_MISMATCH" + * "FABRIC_E_HEALTH_STALE_REPORT" + * "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" + * "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" + * "FABRIC_E_INSTANCE_ID_MISMATCH" + * "FABRIC_E_BACKUP_IN_PROGRESS" + * "FABRIC_E_RESTORE_IN_PROGRESS" + * "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" + + * + Possible values of the error code for HTTP status code 413 (Request Entity Too Large) + + + * "FABRIC_E_VALUE_TOO_LARGE" + + * + Possible values of the error code for HTTP status code 500 (Internal Server Error) + + + * "FABRIC_E_NODE_IS_UP" + * "E_FAIL" + * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" + * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" + * "FABRIC_E_VOLUME_ALREADY_EXISTS" + * "FABRIC_E_VOLUME_NOT_FOUND" + * "SerializationError" + + * + Possible values of the error code for HTTP status code 503 (Service Unavailable) + + + * "FABRIC_E_NO_WRITE_QUORUM" + * "FABRIC_E_NOT_PRIMARY" + * "FABRIC_E_NOT_READY" + * "FABRIC_E_RECONFIGURATION_PENDING" + * "FABRIC_E_SERVICE_OFFLINE" + * "E_ABORT" + * "FABRIC_E_VALUE_TOO_LARGE" + + * + Possible values of the error code for HTTP status code 504 (Gateway Timeout) + + + * "FABRIC_E_COMMUNICATION_ERROR" + * "FABRIC_E_OPERATION_NOT_COMPLETE" + * "FABRIC_E_TIMEOUT". Possible values include: "FABRIC_E_INVALID_PARTITION_KEY", + "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR", "FABRIC_E_INVALID_ADDRESS", + "FABRIC_E_APPLICATION_NOT_UPGRADING", "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR", + "FABRIC_E_FABRIC_NOT_UPGRADING", "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR", + "FABRIC_E_INVALID_CONFIGURATION", "FABRIC_E_INVALID_NAME_URI", "FABRIC_E_PATH_TOO_LONG", + "FABRIC_E_KEY_TOO_LARGE", "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED", + "FABRIC_E_INVALID_ATOMIC_GROUP", "FABRIC_E_VALUE_EMPTY", "FABRIC_E_NODE_NOT_FOUND", + "FABRIC_E_APPLICATION_TYPE_NOT_FOUND", "FABRIC_E_APPLICATION_NOT_FOUND", + "FABRIC_E_SERVICE_TYPE_NOT_FOUND", "FABRIC_E_SERVICE_DOES_NOT_EXIST", + "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND", "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND", + "FABRIC_E_PARTITION_NOT_FOUND", "FABRIC_E_REPLICA_DOES_NOT_EXIST", + "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST", "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND", + "FABRIC_E_DIRECTORY_NOT_FOUND", "FABRIC_E_FABRIC_VERSION_NOT_FOUND", "FABRIC_E_FILE_NOT_FOUND", + "FABRIC_E_NAME_DOES_NOT_EXIST", "FABRIC_E_PROPERTY_DOES_NOT_EXIST", + "FABRIC_E_ENUMERATION_COMPLETED", "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND", + "FABRIC_E_KEY_NOT_FOUND", "FABRIC_E_HEALTH_ENTITY_NOT_FOUND", + "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS", "FABRIC_E_APPLICATION_ALREADY_EXISTS", + "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION", + "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS", "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS", + "FABRIC_E_SERVICE_ALREADY_EXISTS", "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS", + "FABRIC_E_APPLICATION_TYPE_IN_USE", "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION", + "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS", "FABRIC_E_FABRIC_VERSION_IN_USE", + "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS", "FABRIC_E_NAME_ALREADY_EXISTS", + "FABRIC_E_NAME_NOT_EMPTY", "FABRIC_E_PROPERTY_CHECK_FAILED", + "FABRIC_E_SERVICE_METADATA_MISMATCH", "FABRIC_E_SERVICE_TYPE_MISMATCH", + "FABRIC_E_HEALTH_STALE_REPORT", "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED", + "FABRIC_E_NODE_HAS_NOT_STOPPED_YET", "FABRIC_E_INSTANCE_ID_MISMATCH", + "FABRIC_E_VALUE_TOO_LARGE", "FABRIC_E_NO_WRITE_QUORUM", "FABRIC_E_NOT_PRIMARY", + "FABRIC_E_NOT_READY", "FABRIC_E_RECONFIGURATION_PENDING", "FABRIC_E_SERVICE_OFFLINE", + "E_ABORT", "FABRIC_E_COMMUNICATION_ERROR", "FABRIC_E_OPERATION_NOT_COMPLETE", + "FABRIC_E_TIMEOUT", "FABRIC_E_NODE_IS_UP", "E_FAIL", "FABRIC_E_BACKUP_IS_ENABLED", + "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH", "FABRIC_E_INVALID_FOR_STATELESS_SERVICES", + "FABRIC_E_BACKUP_NOT_ENABLED", "FABRIC_E_BACKUP_POLICY_NOT_EXISTING", + "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING", "FABRIC_E_BACKUP_IN_PROGRESS", + "FABRIC_E_RESTORE_IN_PROGRESS", "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING", + "FABRIC_E_INVALID_SERVICE_SCALING_POLICY", "E_INVALIDARG", + "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS", + "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND", "FABRIC_E_VOLUME_ALREADY_EXISTS", + "FABRIC_E_VOLUME_NOT_FOUND", "SerializationError", + "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR". :type code: str or ~azure.servicefabric.models.FabricErrorCodes :param message: Error message. :type message: str @@ -10056,22 +12113,30 @@ class FabricErrorError(Model): 'message': {'key': 'Message', 'type': 'str'}, } - def __init__(self, *, code, message: str=None, **kwargs) -> None: + def __init__( + self, + *, + code: Union[str, "FabricErrorCodes"], + message: Optional[str] = None, + **kwargs + ): super(FabricErrorError, self).__init__(**kwargs) self.code = code self.message = message -class PropertyBatchInfo(Model): +class PropertyBatchInfo(msrest.serialization.Model): """Information about the results of a property batch. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SuccessfulPropertyBatchInfo, FailedPropertyBatchInfo + sub-classes are: FailedPropertyBatchInfo, SuccessfulPropertyBatchInfo. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property batch info, determined by the results of a property + batch. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Successful", "Failed". + :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind """ _validation = { @@ -10083,28 +12148,30 @@ class PropertyBatchInfo(Model): } _subtype_map = { - 'kind': {'Successful': 'SuccessfulPropertyBatchInfo', 'Failed': 'FailedPropertyBatchInfo'} + 'kind': {'Failed': 'FailedPropertyBatchInfo', 'Successful': 'SuccessfulPropertyBatchInfo'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(PropertyBatchInfo, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class FailedPropertyBatchInfo(PropertyBatchInfo): - """Derived from PropertyBatchInfo. Represents the property batch failing. - Contains information about the specific batch failure. + """Derived from PropertyBatchInfo. Represents the property batch failing. Contains information about the specific batch failure. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param error_message: The error message of the failed operation. Describes - the exception thrown due to the first unsuccessful operation in the - property batch. + :param kind: Required. The kind of property batch info, determined by the results of a property + batch. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Successful", "Failed". + :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind + :param error_message: The error message of the failed operation. Describes the exception thrown + due to the first unsuccessful operation in the property batch. :type error_message: str - :param operation_index: The index of the unsuccessful operation in the - property batch. + :param operation_index: The index of the unsuccessful operation in the property batch. :type operation_index: int """ @@ -10118,23 +12185,26 @@ class FailedPropertyBatchInfo(PropertyBatchInfo): 'operation_index': {'key': 'OperationIndex', 'type': 'int'}, } - def __init__(self, *, error_message: str=None, operation_index: int=None, **kwargs) -> None: + def __init__( + self, + *, + error_message: Optional[str] = None, + operation_index: Optional[int] = None, + **kwargs + ): super(FailedPropertyBatchInfo, self).__init__(**kwargs) + self.kind = 'Failed' # type: str self.error_message = error_message self.operation_index = operation_index - self.kind = 'Failed' -class FailedUpgradeDomainProgressObject(Model): - """The detailed upgrade progress for nodes in the current upgrade domain at - the point of failure. +class FailedUpgradeDomainProgressObject(msrest.serialization.Model): + """The detailed upgrade progress for nodes in the current upgrade domain at the point of failure. - :param domain_name: The name of the upgrade domain + :param domain_name: The name of the upgrade domain. :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their - statuses - :type node_upgrade_progress_list: - list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their statuses. + :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -10142,22 +12212,25 @@ class FailedUpgradeDomainProgressObject(Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__(self, *, domain_name: str=None, node_upgrade_progress_list=None, **kwargs) -> None: + def __init__( + self, + *, + domain_name: Optional[str] = None, + node_upgrade_progress_list: Optional[List["NodeUpgradeProgressInfo"]] = None, + **kwargs + ): super(FailedUpgradeDomainProgressObject, self).__init__(**kwargs) self.domain_name = domain_name self.node_upgrade_progress_list = node_upgrade_progress_list -class FailureUpgradeDomainProgressInfo(Model): - """Information about the upgrade domain progress at the time of upgrade - failure. +class FailureUpgradeDomainProgressInfo(msrest.serialization.Model): + """Information about the upgrade domain progress at the time of upgrade failure. - :param domain_name: The name of the upgrade domain + :param domain_name: The name of the upgrade domain. :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their - statuses - :type node_upgrade_progress_list: - list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their statuses. + :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -10165,24 +12238,28 @@ class FailureUpgradeDomainProgressInfo(Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__(self, *, domain_name: str=None, node_upgrade_progress_list=None, **kwargs) -> None: + def __init__( + self, + *, + domain_name: Optional[str] = None, + node_upgrade_progress_list: Optional[List["NodeUpgradeProgressInfo"]] = None, + **kwargs + ): super(FailureUpgradeDomainProgressInfo, self).__init__(**kwargs) self.domain_name = domain_name self.node_upgrade_progress_list = node_upgrade_progress_list -class FileInfo(Model): +class FileInfo(msrest.serialization.Model): """Information about a image store file. :param file_size: The size of file in bytes. :type file_size: str :param file_version: Information about the version of image store file. :type file_version: ~azure.servicefabric.models.FileVersion - :param modified_date: The date and time when the image store file was last - modified. - :type modified_date: datetime - :param store_relative_path: The file path relative to the image store root - path. + :param modified_date: The date and time when the image store file was last modified. + :type modified_date: ~datetime.datetime + :param store_relative_path: The file path relative to the image store root path. :type store_relative_path: str """ @@ -10193,7 +12270,15 @@ class FileInfo(Model): 'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'}, } - def __init__(self, *, file_size: str=None, file_version=None, modified_date=None, store_relative_path: str=None, **kwargs) -> None: + def __init__( + self, + *, + file_size: Optional[str] = None, + file_version: Optional["FileVersion"] = None, + modified_date: Optional[datetime.datetime] = None, + store_relative_path: Optional[str] = None, + **kwargs + ): super(FileInfo, self).__init__(**kwargs) self.file_size = file_size self.file_version = file_version @@ -10202,17 +12287,17 @@ def __init__(self, *, file_size: str=None, file_version=None, modified_date=None class FileShareBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for file share storage used for storing or - enumerating backups. + """Describes the parameters for file share storage used for storing or enumerating backups. All required parameters must be populated in order to send to Azure. + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_kind: Required. Constant filled by server. - :type storage_kind: str - :param path: Required. UNC path of the file share where to store or - enumerate backups from. + :param path: Required. UNC path of the file share where to store or enumerate backups from. :type path: str :param primary_user_name: Primary user name to access the file share. :type primary_user_name: str @@ -10220,7 +12305,7 @@ class FileShareBackupStorageDescription(BackupStorageDescription): :type primary_password: str :param secondary_user_name: Secondary user name to access the file share. :type secondary_user_name: str - :param secondary_password: Secondary password to access the share location + :param secondary_password: Secondary password to access the share location. :type secondary_password: str """ @@ -10230,8 +12315,8 @@ class FileShareBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, 'path': {'key': 'Path', 'type': 'str'}, 'primary_user_name': {'key': 'PrimaryUserName', 'type': 'str'}, 'primary_password': {'key': 'PrimaryPassword', 'type': 'str'}, @@ -10239,27 +12324,37 @@ class FileShareBackupStorageDescription(BackupStorageDescription): 'secondary_password': {'key': 'SecondaryPassword', 'type': 'str'}, } - def __init__(self, *, path: str, friendly_name: str=None, primary_user_name: str=None, primary_password: str=None, secondary_user_name: str=None, secondary_password: str=None, **kwargs) -> None: + def __init__( + self, + *, + path: str, + friendly_name: Optional[str] = None, + primary_user_name: Optional[str] = None, + primary_password: Optional[str] = None, + secondary_user_name: Optional[str] = None, + secondary_password: Optional[str] = None, + **kwargs + ): super(FileShareBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) + self.storage_kind = 'FileShare' # type: str self.path = path self.primary_user_name = primary_user_name self.primary_password = primary_password self.secondary_user_name = secondary_user_name self.secondary_password = secondary_password - self.storage_kind = 'FileShare' -class FileVersion(Model): +class FileVersion(msrest.serialization.Model): """Information about the version of image store file. - :param version_number: The current image store version number for the file - is used in image store for checking whether it need to be updated. + :param version_number: The current image store version number for the file is used in image + store for checking whether it need to be updated. :type version_number: str - :param epoch_data_loss_number: The epoch data loss number of image store - replica when this file entry was updated or created. + :param epoch_data_loss_number: The epoch data loss number of image store replica when this file + entry was updated or created. :type epoch_data_loss_number: str - :param epoch_configuration_number: The epoch configuration version number - of the image store replica when this file entry was created or updated. + :param epoch_configuration_number: The epoch configuration version number of the image store + replica when this file entry was created or updated. :type epoch_configuration_number: str """ @@ -10269,19 +12364,25 @@ class FileVersion(Model): 'epoch_configuration_number': {'key': 'EpochConfigurationNumber', 'type': 'str'}, } - def __init__(self, *, version_number: str=None, epoch_data_loss_number: str=None, epoch_configuration_number: str=None, **kwargs) -> None: + def __init__( + self, + *, + version_number: Optional[str] = None, + epoch_data_loss_number: Optional[str] = None, + epoch_configuration_number: Optional[str] = None, + **kwargs + ): super(FileVersion, self).__init__(**kwargs) self.version_number = version_number self.epoch_data_loss_number = epoch_data_loss_number self.epoch_configuration_number = epoch_configuration_number -class FolderInfo(Model): - """Information about a image store folder. It includes how many files this - folder contains and its image store relative path. +class FolderInfo(msrest.serialization.Model): + """Information about a image store folder. It includes how many files this folder contains and its image store relative path. - :param store_relative_path: The remote location within image store. This - path is relative to the image store root. + :param store_relative_path: The remote location within image store. This path is relative to + the image store root. :type store_relative_path: str :param file_count: The number of files from within the image store folder. :type file_count: str @@ -10292,17 +12393,23 @@ class FolderInfo(Model): 'file_count': {'key': 'FileCount', 'type': 'str'}, } - def __init__(self, *, store_relative_path: str=None, file_count: str=None, **kwargs) -> None: + def __init__( + self, + *, + store_relative_path: Optional[str] = None, + file_count: Optional[str] = None, + **kwargs + ): super(FolderInfo, self).__init__(**kwargs) self.store_relative_path = store_relative_path self.file_count = file_count -class FolderSizeInfo(Model): +class FolderSizeInfo(msrest.serialization.Model): """Information of a image store folder size. - :param store_relative_path: The remote location within image store. This - path is relative to the image store root. + :param store_relative_path: The remote location within image store. This path is relative to + the image store root. :type store_relative_path: str :param folder_size: The size of folder in bytes. :type folder_size: str @@ -10313,7 +12420,13 @@ class FolderSizeInfo(Model): 'folder_size': {'key': 'FolderSize', 'type': 'str'}, } - def __init__(self, *, store_relative_path: str=None, folder_size: str=None, **kwargs) -> None: + def __init__( + self, + *, + store_relative_path: Optional[str] = None, + folder_size: Optional[str] = None, + **kwargs + ): super(FolderSizeInfo, self).__init__(**kwargs) self.store_relative_path = store_relative_path self.folder_size = folder_size @@ -10324,12 +12437,14 @@ class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. Constant filled by server. - :type schedule_kind: str - :param interval: Required. Defines the interval with which backups are - periodically taken. It should be specified in ISO8601 format. Timespan in - seconds is not supported and will be ignored while creating the policy. - :type interval: timedelta + :param schedule_kind: Required. The kind of backup schedule, time based or frequency + based.Constant filled by server. Possible values include: "Invalid", "TimeBased", + "FrequencyBased". + :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind + :param interval: Required. Defines the interval with which backups are periodically taken. It + should be specified in ISO8601 format. Timespan in seconds is not supported and will be ignored + while creating the policy. + :type interval: ~datetime.timedelta """ _validation = { @@ -10342,19 +12457,23 @@ class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): 'interval': {'key': 'Interval', 'type': 'duration'}, } - def __init__(self, *, interval, **kwargs) -> None: + def __init__( + self, + *, + interval: datetime.timedelta, + **kwargs + ): super(FrequencyBasedBackupScheduleDescription, self).__init__(**kwargs) + self.schedule_kind = 'FrequencyBased' # type: str self.interval = interval - self.schedule_kind = 'FrequencyBased' -class GatewayDestination(Model): +class GatewayDestination(msrest.serialization.Model): """Describes destination endpoint for routing traffic. All required parameters must be populated in order to send to Azure. - :param application_name: Required. Name of the service fabric Mesh - application. + :param application_name: Required. Name of the service fabric Mesh application. :type application_name: str :param service_name: Required. service that contains the endpoint. :type service_name: str @@ -10374,18 +12493,24 @@ class GatewayDestination(Model): 'endpoint_name': {'key': 'endpointName', 'type': 'str'}, } - def __init__(self, *, application_name: str, service_name: str, endpoint_name: str, **kwargs) -> None: + def __init__( + self, + *, + application_name: str, + service_name: str, + endpoint_name: str, + **kwargs + ): super(GatewayDestination, self).__init__(**kwargs) self.application_name = application_name self.service_name = service_name self.endpoint_name = endpoint_name -class GatewayResourceDescription(Model): +class GatewayResourceDescription(msrest.serialization.Model): """This type describes a gateway resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. @@ -10393,24 +12518,21 @@ class GatewayResourceDescription(Model): :type name: str :param description: User readable description of the gateway. :type description: str - :param source_network: Required. Network the gateway should listen on for - requests. + :param source_network: Required. Network the gateway should listen on for requests. :type source_network: ~azure.servicefabric.models.NetworkRef - :param destination_network: Required. Network that the Application is - using. + :param destination_network: Required. Network that the Application is using. :type destination_network: ~azure.servicefabric.models.NetworkRef :param tcp: Configuration for tcp connectivity for this gateway. :type tcp: list[~azure.servicefabric.models.TcpConfig] :param http: Configuration for http connectivity for this gateway. :type http: list[~azure.servicefabric.models.HttpConfig] - :ivar status: Status of the resource. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the gateway. + :ivar status_details: Gives additional information about the current status of the gateway. :vartype status_details: str - :ivar ip_address: IP address of the gateway. This is populated in the - response and is ignored for incoming requests. + :ivar ip_address: IP address of the gateway. This is populated in the response and is ignored + for incoming requests. :vartype ip_address: str """ @@ -10435,7 +12557,17 @@ class GatewayResourceDescription(Model): 'ip_address': {'key': 'properties.ipAddress', 'type': 'str'}, } - def __init__(self, *, name: str, source_network, destination_network, description: str=None, tcp=None, http=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + source_network: "NetworkRef", + destination_network: "NetworkRef", + description: Optional[str] = None, + tcp: Optional[List["TcpConfig"]] = None, + http: Optional[List["HttpConfig"]] = None, + **kwargs + ): super(GatewayResourceDescription, self).__init__(**kwargs) self.name = name self.description = description @@ -10448,33 +12580,27 @@ def __init__(self, *, name: str, source_network, destination_network, descriptio self.ip_address = None -class GetBackupByStorageQueryDescription(Model): - """Describes additional filters to be applied, while listing backups, and - backup storage details from where to fetch the backups. +class GetBackupByStorageQueryDescription(msrest.serialization.Model): + """Describes additional filters to be applied, while listing backups, and backup storage details from where to fetch the backups. All required parameters must be populated in order to send to Azure. - :param start_date_time_filter: Specifies the start date time in ISO8601 - from which to enumerate backups. If not specified, backups are enumerated - from the beginning. - :type start_date_time_filter: datetime - :param end_date_time_filter: Specifies the end date time in ISO8601 till - which to enumerate backups. If not specified, backups are enumerated till - the end. - :type end_date_time_filter: datetime - :param latest: If specified as true, gets the most recent backup (within - the specified time range) for every partition under the specified backup - entity. Default value: False . + :param start_date_time_filter: Specifies the start date time in ISO8601 from which to enumerate + backups. If not specified, backups are enumerated from the beginning. + :type start_date_time_filter: ~datetime.datetime + :param end_date_time_filter: Specifies the end date time in ISO8601 till which to enumerate + backups. If not specified, backups are enumerated till the end. + :type end_date_time_filter: ~datetime.datetime + :param latest: If specified as true, gets the most recent backup (within the specified time + range) for every partition under the specified backup entity. :type latest: bool - :param storage: Required. Describes the parameters for the backup storage - from where to enumerate backups. This is optional and by default backups - are enumerated from the backup storage where this backup entity is - currently being backed up (as specified in backup policy). This parameter - is useful to be able to enumerate backups from another cluster where you - may intend to restore. + :param storage: Required. Describes the parameters for the backup storage from where to + enumerate backups. This is optional and by default backups are enumerated from the backup + storage where this backup entity is currently being backed up (as specified in backup policy). + This parameter is useful to be able to enumerate backups from another cluster where you may + intend to restore. :type storage: ~azure.servicefabric.models.BackupStorageDescription - :param backup_entity: Required. Indicates the entity for which to - enumerate backups. + :param backup_entity: Required. Indicates the entity for which to enumerate backups. :type backup_entity: ~azure.servicefabric.models.BackupEntity """ @@ -10491,7 +12617,16 @@ class GetBackupByStorageQueryDescription(Model): 'backup_entity': {'key': 'BackupEntity', 'type': 'BackupEntity'}, } - def __init__(self, *, storage, backup_entity, start_date_time_filter=None, end_date_time_filter=None, latest: bool=False, **kwargs) -> None: + def __init__( + self, + *, + storage: "BackupStorageDescription", + backup_entity: "BackupEntity", + start_date_time_filter: Optional[datetime.datetime] = None, + end_date_time_filter: Optional[datetime.datetime] = None, + latest: Optional[bool] = False, + **kwargs + ): super(GetBackupByStorageQueryDescription, self).__init__(**kwargs) self.start_date_time_filter = start_date_time_filter self.end_date_time_filter = end_date_time_filter @@ -10501,39 +12636,44 @@ def __init__(self, *, storage, backup_entity, start_date_time_filter=None, end_d class GetPropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that gets the specified property if it - exists. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that gets the specified property if it exists. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str - :param include_value: Whether or not to return the property value with the - metadata. - True if values should be returned with the metadata; False to return only - property metadata. Default value: False . + :param include_value: Whether or not to return the property value with the metadata. + True if values should be returned with the metadata; False to return only property metadata. :type include_value: bool """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'include_value': {'key': 'IncludeValue', 'type': 'bool'}, } - def __init__(self, *, property_name: str, include_value: bool=False, **kwargs) -> None: + def __init__( + self, + *, + property_name: str, + include_value: Optional[bool] = False, + **kwargs + ): super(GetPropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.kind = 'Get' # type: str self.include_value = include_value - self.kind = 'Get' class GuidPropertyValue(PropertyValue): @@ -10541,8 +12681,10 @@ class GuidPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind :param data: Required. The data of the property value. :type data: str """ @@ -10557,18 +12699,22 @@ class GuidPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__(self, *, data: str, **kwargs) -> None: + def __init__( + self, + *, + data: str, + **kwargs + ): super(GuidPropertyValue, self).__init__(**kwargs) + self.kind = 'Guid' # type: str self.data = data - self.kind = 'Guid' -class HealthEvaluationWrapper(Model): +class HealthEvaluationWrapper(msrest.serialization.Model): """Wrapper object for health evaluation. - :param health_evaluation: Represents a health evaluation which describes - the data and the algorithm used by health manager to evaluate the health - of an entity. + :param health_evaluation: Represents a health evaluation which describes the data and the + algorithm used by health manager to evaluate the health of an entity. :type health_evaluation: ~azure.servicefabric.models.HealthEvaluation """ @@ -10576,86 +12722,81 @@ class HealthEvaluationWrapper(Model): 'health_evaluation': {'key': 'HealthEvaluation', 'type': 'HealthEvaluation'}, } - def __init__(self, *, health_evaluation=None, **kwargs) -> None: + def __init__( + self, + *, + health_evaluation: Optional["HealthEvaluation"] = None, + **kwargs + ): super(HealthEvaluationWrapper, self).__init__(**kwargs) self.health_evaluation = health_evaluation -class HealthInformation(Model): - """Represents common health report information. It is included in all health - reports sent to health store and in all health events returned by health - queries. +class HealthInformation(msrest.serialization.Model): + """Represents common health report information. It is included in all health reports sent to health store and in all health events returned by health queries. All required parameters must be populated in order to send to Azure. - :param source_id: Required. The source name that identifies the - client/watchdog/system component that generated the health information. + :param source_id: Required. The source name that identifies the client/watchdog/system + component that generated the health information. :type source_id: str - :param property: Required. The property of the health information. An - entity can have health reports for different properties. - The property is a string and not a fixed enumeration to allow the reporter - flexibility to categorize the state condition that triggers the report. - For example, a reporter with SourceId "LocalWatchdog" can monitor the - state of the available disk on a node, + :param property: Required. The property of the health information. An entity can have health + reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter flexibility to + categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the state of the available + disk on a node, so it can report "AvailableDisk" property on that node. - The same reporter can monitor the node connectivity, so it can report a - property "Connectivity" on the same node. - In the health store, these reports are treated as separate health events - for the specified node. - Together with the SourceId, the property uniquely identifies the health - information. + The same reporter can monitor the node connectivity, so it can report a property + "Connectivity" on the same node. + In the health store, these reports are treated as separate health events for the specified + node. + + Together with the SourceId, the property uniquely identifies the health information. :type property: str - :param health_state: Required. The health state of a Service Fabric entity - such as Cluster, Node, Application, Service, Partition, Replica etc. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: Required. The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param time_to_live_in_milli_seconds: The duration for which this health - report is valid. This field uses ISO8601 format for specifying the - duration. - When clients report periodically, they should send reports with higher - frequency than time to live. - If clients report on transition, they can set the time to live to - infinite. - When time to live expires, the health event that contains the health - information - is either removed from health store, if RemoveWhenExpired is true, or - evaluated at error, if RemoveWhenExpired false. + :param time_to_live_in_milli_seconds: The duration for which this health report is valid. This + field uses ISO8601 format for specifying the duration. + When clients report periodically, they should send reports with higher frequency than time to + live. + If clients report on transition, they can set the time to live to infinite. + When time to live expires, the health event that contains the health information + is either removed from health store, if RemoveWhenExpired is true, or evaluated at error, if + RemoveWhenExpired false. + If not specified, time to live defaults to infinite value. - :type time_to_live_in_milli_seconds: timedelta - :param description: The description of the health information. It - represents free text used to add human readable information about the - report. + :type time_to_live_in_milli_seconds: ~datetime.timedelta + :param description: The description of the health information. It represents free text used to + add human readable information about the report. The maximum string length for the description is 4096 characters. If the provided string is longer, it will be automatically truncated. - When truncated, the last characters of the description contain a marker - "[Truncated]", and total string size is 4096 characters. + When truncated, the last characters of the description contain a marker "[Truncated]", and + total string size is 4096 characters. The presence of the marker indicates to users that truncation occurred. - Note that when truncated, the description has less than 4096 characters - from the original string. + Note that when truncated, the description has less than 4096 characters from the original + string. :type description: str - :param sequence_number: The sequence number for this health report as a - numeric string. - The report sequence number is used by the health store to detect stale - reports. - If not specified, a sequence number is auto-generated by the health client - when a report is added. + :param sequence_number: The sequence number for this health report as a numeric string. + The report sequence number is used by the health store to detect stale reports. + If not specified, a sequence number is auto-generated by the health client when a report is + added. :type sequence_number: str - :param remove_when_expired: Value that indicates whether the report is - removed from health store when it expires. - If set to true, the report is removed from the health store after it - expires. - If set to false, the report is treated as an error when expired. The value - of this property is false by default. - When clients report periodically, they should set RemoveWhenExpired false - (default). - This way, if the reporter has issues (e.g. deadlock) and can't report, the - entity is evaluated at error when the health report expires. + :param remove_when_expired: Value that indicates whether the report is removed from health + store when it expires. + If set to true, the report is removed from the health store after it expires. + If set to false, the report is treated as an error when expired. The value of this property is + false by default. + When clients report periodically, they should set RemoveWhenExpired false (default). + This way, if the reporter has issues (e.g. deadlock) and can't report, the entity is evaluated + at error when the health report expires. This flags the entity as being in Error health state. :type remove_when_expired: bool - :param health_report_id: A health report ID which identifies the health - report and can be used to find more detailed information about a specific - health event at - aka.ms/sfhealthid + :param health_report_id: A health report ID which identifies the health report and can be used + to find more detailed information about a specific health event at + aka.ms/sfhealthid. :type health_report_id: str """ @@ -10676,7 +12817,19 @@ class HealthInformation(Model): 'health_report_id': {'key': 'HealthReportId', 'type': 'str'}, } - def __init__(self, *, source_id: str, property: str, health_state, time_to_live_in_milli_seconds=None, description: str=None, sequence_number: str=None, remove_when_expired: bool=None, health_report_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + source_id: str, + property: str, + health_state: Union[str, "HealthState"], + time_to_live_in_milli_seconds: Optional[datetime.timedelta] = None, + description: Optional[str] = None, + sequence_number: Optional[str] = None, + remove_when_expired: Optional[bool] = None, + health_report_id: Optional[str] = None, + **kwargs + ): super(HealthInformation, self).__init__(**kwargs) self.source_id = source_id self.property = property @@ -10689,121 +12842,108 @@ def __init__(self, *, source_id: str, property: str, health_state, time_to_live_ class HealthEvent(HealthInformation): - """Represents health information reported on a health entity, such as cluster, - application or node, with additional metadata added by the Health Manager. + """Represents health information reported on a health entity, such as cluster, application or node, with additional metadata added by the Health Manager. All required parameters must be populated in order to send to Azure. - :param source_id: Required. The source name that identifies the - client/watchdog/system component that generated the health information. + :param source_id: Required. The source name that identifies the client/watchdog/system + component that generated the health information. :type source_id: str - :param property: Required. The property of the health information. An - entity can have health reports for different properties. - The property is a string and not a fixed enumeration to allow the reporter - flexibility to categorize the state condition that triggers the report. - For example, a reporter with SourceId "LocalWatchdog" can monitor the - state of the available disk on a node, + :param property: Required. The property of the health information. An entity can have health + reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter flexibility to + categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the state of the available + disk on a node, so it can report "AvailableDisk" property on that node. - The same reporter can monitor the node connectivity, so it can report a - property "Connectivity" on the same node. - In the health store, these reports are treated as separate health events - for the specified node. - Together with the SourceId, the property uniquely identifies the health - information. + The same reporter can monitor the node connectivity, so it can report a property + "Connectivity" on the same node. + In the health store, these reports are treated as separate health events for the specified + node. + + Together with the SourceId, the property uniquely identifies the health information. :type property: str - :param health_state: Required. The health state of a Service Fabric entity - such as Cluster, Node, Application, Service, Partition, Replica etc. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: Required. The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param time_to_live_in_milli_seconds: The duration for which this health - report is valid. This field uses ISO8601 format for specifying the - duration. - When clients report periodically, they should send reports with higher - frequency than time to live. - If clients report on transition, they can set the time to live to - infinite. - When time to live expires, the health event that contains the health - information - is either removed from health store, if RemoveWhenExpired is true, or - evaluated at error, if RemoveWhenExpired false. + :param time_to_live_in_milli_seconds: The duration for which this health report is valid. This + field uses ISO8601 format for specifying the duration. + When clients report periodically, they should send reports with higher frequency than time to + live. + If clients report on transition, they can set the time to live to infinite. + When time to live expires, the health event that contains the health information + is either removed from health store, if RemoveWhenExpired is true, or evaluated at error, if + RemoveWhenExpired false. + If not specified, time to live defaults to infinite value. - :type time_to_live_in_milli_seconds: timedelta - :param description: The description of the health information. It - represents free text used to add human readable information about the - report. + :type time_to_live_in_milli_seconds: ~datetime.timedelta + :param description: The description of the health information. It represents free text used to + add human readable information about the report. The maximum string length for the description is 4096 characters. If the provided string is longer, it will be automatically truncated. - When truncated, the last characters of the description contain a marker - "[Truncated]", and total string size is 4096 characters. + When truncated, the last characters of the description contain a marker "[Truncated]", and + total string size is 4096 characters. The presence of the marker indicates to users that truncation occurred. - Note that when truncated, the description has less than 4096 characters - from the original string. + Note that when truncated, the description has less than 4096 characters from the original + string. :type description: str - :param sequence_number: The sequence number for this health report as a - numeric string. - The report sequence number is used by the health store to detect stale - reports. - If not specified, a sequence number is auto-generated by the health client - when a report is added. + :param sequence_number: The sequence number for this health report as a numeric string. + The report sequence number is used by the health store to detect stale reports. + If not specified, a sequence number is auto-generated by the health client when a report is + added. :type sequence_number: str - :param remove_when_expired: Value that indicates whether the report is - removed from health store when it expires. - If set to true, the report is removed from the health store after it - expires. - If set to false, the report is treated as an error when expired. The value - of this property is false by default. - When clients report periodically, they should set RemoveWhenExpired false - (default). - This way, if the reporter has issues (e.g. deadlock) and can't report, the - entity is evaluated at error when the health report expires. + :param remove_when_expired: Value that indicates whether the report is removed from health + store when it expires. + If set to true, the report is removed from the health store after it expires. + If set to false, the report is treated as an error when expired. The value of this property is + false by default. + When clients report periodically, they should set RemoveWhenExpired false (default). + This way, if the reporter has issues (e.g. deadlock) and can't report, the entity is evaluated + at error when the health report expires. This flags the entity as being in Error health state. :type remove_when_expired: bool - :param health_report_id: A health report ID which identifies the health - report and can be used to find more detailed information about a specific - health event at - aka.ms/sfhealthid + :param health_report_id: A health report ID which identifies the health report and can be used + to find more detailed information about a specific health event at + aka.ms/sfhealthid. :type health_report_id: str - :param is_expired: Returns true if the health event is expired, otherwise - false. + :param is_expired: Returns true if the health event is expired, otherwise false. :type is_expired: bool - :param source_utc_timestamp: The date and time when the health report was - sent by the source. - :type source_utc_timestamp: datetime - :param last_modified_utc_timestamp: The date and time when the health - report was last modified by the health store. - :type last_modified_utc_timestamp: datetime - :param last_ok_transition_at: If the current health state is 'Ok', this - property returns the time at which the health report was first reported - with 'Ok'. - For periodic reporting, many reports with the same state may have been - generated. - This property returns the date and time when the first 'Ok' health report - was received. - If the current health state is 'Error' or 'Warning', returns the date and - time at which the health state was last in 'Ok', before transitioning to a - different state. + :param source_utc_timestamp: The date and time when the health report was sent by the source. + :type source_utc_timestamp: ~datetime.datetime + :param last_modified_utc_timestamp: The date and time when the health report was last modified + by the health store. + :type last_modified_utc_timestamp: ~datetime.datetime + :param last_ok_transition_at: If the current health state is 'Ok', this property returns the + time at which the health report was first reported with 'Ok'. + For periodic reporting, many reports with the same state may have been generated. + This property returns the date and time when the first 'Ok' health report was received. + + If the current health state is 'Error' or 'Warning', returns the date and time at which the + health state was last in 'Ok', before transitioning to a different state. + If the health state was never 'Ok', the value will be zero date-time. - :type last_ok_transition_at: datetime - :param last_warning_transition_at: If the current health state is - 'Warning', this property returns the time at which the health report was - first reported with 'Warning'. For periodic reporting, many reports with - the same state may have been generated however, this property returns only - the date and time at the first 'Warning' health report was received. - If the current health state is 'Ok' or 'Error', returns the date and time - at which the health state was last in 'Warning', before transitioning to a - different state. + :type last_ok_transition_at: ~datetime.datetime + :param last_warning_transition_at: If the current health state is 'Warning', this property + returns the time at which the health report was first reported with 'Warning'. For periodic + reporting, many reports with the same state may have been generated however, this property + returns only the date and time at the first 'Warning' health report was received. + + If the current health state is 'Ok' or 'Error', returns the date and time at which the health + state was last in 'Warning', before transitioning to a different state. + If the health state was never 'Warning', the value will be zero date-time. - :type last_warning_transition_at: datetime - :param last_error_transition_at: If the current health state is 'Error', - this property returns the time at which the health report was first - reported with 'Error'. For periodic reporting, many reports with the same - state may have been generated however, this property returns only the date - and time at the first 'Error' health report was received. - If the current health state is 'Ok' or 'Warning', returns the date and - time at which the health state was last in 'Error', before transitioning - to a different state. + :type last_warning_transition_at: ~datetime.datetime + :param last_error_transition_at: If the current health state is 'Error', this property returns + the time at which the health report was first reported with 'Error'. For periodic reporting, + many reports with the same state may have been generated however, this property returns only + the date and time at the first 'Error' health report was received. + + If the current health state is 'Ok' or 'Warning', returns the date and time at which the + health state was last in 'Error', before transitioning to a different state. + If the health state was never 'Error', the value will be zero date-time. - :type last_error_transition_at: datetime + :type last_error_transition_at: ~datetime.datetime """ _validation = { @@ -10829,7 +12969,25 @@ class HealthEvent(HealthInformation): 'last_error_transition_at': {'key': 'LastErrorTransitionAt', 'type': 'iso-8601'}, } - def __init__(self, *, source_id: str, property: str, health_state, time_to_live_in_milli_seconds=None, description: str=None, sequence_number: str=None, remove_when_expired: bool=None, health_report_id: str=None, is_expired: bool=None, source_utc_timestamp=None, last_modified_utc_timestamp=None, last_ok_transition_at=None, last_warning_transition_at=None, last_error_transition_at=None, **kwargs) -> None: + def __init__( + self, + *, + source_id: str, + property: str, + health_state: Union[str, "HealthState"], + time_to_live_in_milli_seconds: Optional[datetime.timedelta] = None, + description: Optional[str] = None, + sequence_number: Optional[str] = None, + remove_when_expired: Optional[bool] = None, + health_report_id: Optional[str] = None, + is_expired: Optional[bool] = None, + source_utc_timestamp: Optional[datetime.datetime] = None, + last_modified_utc_timestamp: Optional[datetime.datetime] = None, + last_ok_transition_at: Optional[datetime.datetime] = None, + last_warning_transition_at: Optional[datetime.datetime] = None, + last_error_transition_at: Optional[datetime.datetime] = None, + **kwargs + ): super(HealthEvent, self).__init__(source_id=source_id, property=property, health_state=health_state, time_to_live_in_milli_seconds=time_to_live_in_milli_seconds, description=description, sequence_number=sequence_number, remove_when_expired=remove_when_expired, health_report_id=health_report_id, **kwargs) self.is_expired = is_expired self.source_utc_timestamp = source_utc_timestamp @@ -10839,18 +12997,14 @@ def __init__(self, *, source_id: str, property: str, health_state, time_to_live_ self.last_error_transition_at = last_error_transition_at -class HealthStateCount(Model): - """Represents information about how many health entities are in Ok, Warning - and Error health state. +class HealthStateCount(msrest.serialization.Model): + """Represents information about how many health entities are in Ok, Warning and Error health state. - :param ok_count: The number of health entities with aggregated health - state Ok. + :param ok_count: The number of health entities with aggregated health state Ok. :type ok_count: long - :param warning_count: The number of health entities with aggregated health - state Warning. + :param warning_count: The number of health entities with aggregated health state Warning. :type warning_count: long - :param error_count: The number of health entities with aggregated health - state Error. + :param error_count: The number of health entities with aggregated health state Error. :type error_count: long """ @@ -10866,49 +13020,54 @@ class HealthStateCount(Model): 'error_count': {'key': 'ErrorCount', 'type': 'long'}, } - def __init__(self, *, ok_count: int=None, warning_count: int=None, error_count: int=None, **kwargs) -> None: + def __init__( + self, + *, + ok_count: Optional[int] = None, + warning_count: Optional[int] = None, + error_count: Optional[int] = None, + **kwargs + ): super(HealthStateCount, self).__init__(**kwargs) self.ok_count = ok_count self.warning_count = warning_count self.error_count = error_count -class HealthStatistics(Model): - """The health statistics of an entity, returned as part of the health query - result when the query description is configured to include statistics. - The statistics include health state counts for all children types of the - current entity. - For example, for cluster, the health statistics include health state counts - for nodes, applications, services, partitions, replicas, deployed - applications and deployed service packages. - For partition, the health statistics include health counts for replicas. +class HealthStatistics(msrest.serialization.Model): + """The health statistics of an entity, returned as part of the health query result when the query description is configured to include statistics. +The statistics include health state counts for all children types of the current entity. +For example, for cluster, the health statistics include health state counts for nodes, applications, services, partitions, replicas, deployed applications and deployed service packages. +For partition, the health statistics include health counts for replicas. - :param health_state_count_list: List of health state counts per entity - kind, which keeps track of how many children of the queried entity are in - Ok, Warning and Error state. - :type health_state_count_list: - list[~azure.servicefabric.models.EntityKindHealthStateCount] + :param health_state_count_list: List of health state counts per entity kind, which keeps track + of how many children of the queried entity are in Ok, Warning and Error state. + :type health_state_count_list: list[~azure.servicefabric.models.EntityKindHealthStateCount] """ _attribute_map = { 'health_state_count_list': {'key': 'HealthStateCountList', 'type': '[EntityKindHealthStateCount]'}, } - def __init__(self, *, health_state_count_list=None, **kwargs) -> None: + def __init__( + self, + *, + health_state_count_list: Optional[List["EntityKindHealthStateCount"]] = None, + **kwargs + ): super(HealthStatistics, self).__init__(**kwargs) self.health_state_count_list = health_state_count_list -class HttpConfig(Model): - """Describes the http configuration for external connectivity for this - network. +class HttpConfig(msrest.serialization.Model): + """Describes the http configuration for external connectivity for this network. All required parameters must be populated in order to send to Azure. :param name: Required. http gateway config name. :type name: str - :param port: Required. Specifies the port at which the service endpoint - below needs to be exposed. + :param port: Required. Specifies the port at which the service endpoint below needs to be + exposed. :type port: int :param hosts: Required. description for routing. :type hosts: list[~azure.servicefabric.models.HttpHostConfig] @@ -10926,23 +13085,30 @@ class HttpConfig(Model): 'hosts': {'key': 'hosts', 'type': '[HttpHostConfig]'}, } - def __init__(self, *, name: str, port: int, hosts, **kwargs) -> None: + def __init__( + self, + *, + name: str, + port: int, + hosts: List["HttpHostConfig"], + **kwargs + ): super(HttpConfig, self).__init__(**kwargs) self.name = name self.port = port self.hosts = hosts -class HttpHostConfig(Model): +class HttpHostConfig(msrest.serialization.Model): """Describes the hostname properties for http routing. All required parameters must be populated in order to send to Azure. :param name: Required. http hostname config name. :type name: str - :param routes: Required. Route information to use for routing. Routes are - processed in the order they are specified. Specify routes that are more - specific before routes that can handle general cases. + :param routes: Required. Route information to use for routing. Routes are processed in the + order they are specified. Specify routes that are more specific before routes that can handle + general cases. :type routes: list[~azure.servicefabric.models.HttpRouteConfig] """ @@ -10956,13 +13122,19 @@ class HttpHostConfig(Model): 'routes': {'key': 'routes', 'type': '[HttpRouteConfig]'}, } - def __init__(self, *, name: str, routes, **kwargs) -> None: + def __init__( + self, + *, + name: str, + routes: List["HttpRouteConfig"], + **kwargs + ): super(HttpHostConfig, self).__init__(**kwargs) self.name = name self.routes = routes -class HttpRouteConfig(Model): +class HttpRouteConfig(msrest.serialization.Model): """Describes the hostname properties for http routing. All required parameters must be populated in order to send to Azure. @@ -10971,8 +13143,7 @@ class HttpRouteConfig(Model): :type name: str :param match: Required. Describes a rule for http route matching. :type match: ~azure.servicefabric.models.HttpRouteMatchRule - :param destination: Required. Describes destination endpoint for routing - traffic. + :param destination: Required. Describes destination endpoint for routing traffic. :type destination: ~azure.servicefabric.models.GatewayDestination """ @@ -10988,14 +13159,21 @@ class HttpRouteConfig(Model): 'destination': {'key': 'destination', 'type': 'GatewayDestination'}, } - def __init__(self, *, name: str, match, destination, **kwargs) -> None: + def __init__( + self, + *, + name: str, + match: "HttpRouteMatchRule", + destination: "GatewayDestination", + **kwargs + ): super(HttpRouteConfig, self).__init__(**kwargs) self.name = name self.match = match self.destination = destination -class HttpRouteMatchHeader(Model): +class HttpRouteMatchHeader(msrest.serialization.Model): """Describes header information for http route matching. All required parameters must be populated in order to send to Azure. @@ -11004,7 +13182,7 @@ class HttpRouteMatchHeader(Model): :type name: str :param value: Value of header to match in request. :type value: str - :param type: how to match header value. Possible values include: 'exact' + :param type: how to match header value. Possible values include: "exact". :type type: str or ~azure.servicefabric.models.HeaderMatchType """ @@ -11018,33 +13196,36 @@ class HttpRouteMatchHeader(Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__(self, *, name: str, value: str=None, type=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + value: Optional[str] = None, + type: Optional[Union[str, "HeaderMatchType"]] = None, + **kwargs + ): super(HttpRouteMatchHeader, self).__init__(**kwargs) self.name = name self.value = value self.type = type -class HttpRouteMatchPath(Model): +class HttpRouteMatchPath(msrest.serialization.Model): """Path to match for routing. - Variables are only populated by the server, and will be ignored when - sending a request. - All required parameters must be populated in order to send to Azure. :param value: Required. Uri path to match for request. :type value: str :param rewrite: replacement string for matched part of the Uri. :type rewrite: str - :ivar type: Required. how to match value in the Uri. Default value: - "prefix" . - :vartype type: str + :param type: Required. how to match value in the Uri. Possible values include: "prefix". + :type type: str or ~azure.servicefabric.models.PathMatchType """ _validation = { 'value': {'required': True}, - 'type': {'required': True, 'constant': True}, + 'type': {'required': True}, } _attribute_map = { @@ -11053,15 +13234,21 @@ class HttpRouteMatchPath(Model): 'type': {'key': 'type', 'type': 'str'}, } - type = "prefix" - - def __init__(self, *, value: str, rewrite: str=None, **kwargs) -> None: + def __init__( + self, + *, + value: str, + type: Union[str, "PathMatchType"], + rewrite: Optional[str] = None, + **kwargs + ): super(HttpRouteMatchPath, self).__init__(**kwargs) self.value = value self.rewrite = rewrite + self.type = type -class HttpRouteMatchRule(Model): +class HttpRouteMatchRule(msrest.serialization.Model): """Describes a rule for http route matching. All required parameters must be populated in order to send to Azure. @@ -11081,32 +13268,35 @@ class HttpRouteMatchRule(Model): 'headers': {'key': 'headers', 'type': '[HttpRouteMatchHeader]'}, } - def __init__(self, *, path, headers=None, **kwargs) -> None: + def __init__( + self, + *, + path: "HttpRouteMatchPath", + headers: Optional[List["HttpRouteMatchHeader"]] = None, + **kwargs + ): super(HttpRouteMatchRule, self).__init__(**kwargs) self.path = path self.headers = headers -class IdentityDescription(Model): +class IdentityDescription(msrest.serialization.Model): """Information describing the identities associated with this application. All required parameters must be populated in order to send to Azure. - :param token_service_endpoint: the endpoint for the token service managing - this identity + :param token_service_endpoint: the endpoint for the token service managing this identity. :type token_service_endpoint: str - :param type: Required. the types of identities associated with this - resource; currently restricted to 'SystemAssigned and UserAssigned' + :param type: Required. the types of identities associated with this resource; currently + restricted to 'SystemAssigned and UserAssigned'. :type type: str - :param tenant_id: the identifier of the tenant containing the - application's identity. + :param tenant_id: the identifier of the tenant containing the application's identity. :type tenant_id: str - :param principal_id: the object identifier of the Service Principal of the - identity associated with this resource. + :param principal_id: the object identifier of the Service Principal of the identity associated + with this resource. :type principal_id: str :param user_assigned_identities: represents user assigned identities map. - :type user_assigned_identities: dict[str, - ~azure.servicefabric.models.IdentityItemDescription] + :type user_assigned_identities: dict[str, ~azure.servicefabric.models.IdentityItemDescription] """ _validation = { @@ -11121,7 +13311,16 @@ class IdentityDescription(Model): 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{IdentityItemDescription}'}, } - def __init__(self, *, type: str, token_service_endpoint: str=None, tenant_id: str=None, principal_id: str=None, user_assigned_identities=None, **kwargs) -> None: + def __init__( + self, + *, + type: str, + token_service_endpoint: Optional[str] = None, + tenant_id: Optional[str] = None, + principal_id: Optional[str] = None, + user_assigned_identities: Optional[Dict[str, "IdentityItemDescription"]] = None, + **kwargs + ): super(IdentityDescription, self).__init__(**kwargs) self.token_service_endpoint = token_service_endpoint self.type = type @@ -11130,14 +13329,14 @@ def __init__(self, *, type: str, token_service_endpoint: str=None, tenant_id: st self.user_assigned_identities = user_assigned_identities -class IdentityItemDescription(Model): +class IdentityItemDescription(msrest.serialization.Model): """Describes a single user-assigned identity associated with the application. - :param principal_id: the object identifier of the Service Principal which - this identity represents. + :param principal_id: the object identifier of the Service Principal which this identity + represents. :type principal_id: str - :param client_id: the client identifier of the Service Principal which - this identity represents. + :param client_id: the client identifier of the Service Principal which this identity + represents. :type client_id: str """ @@ -11146,30 +13345,35 @@ class IdentityItemDescription(Model): 'client_id': {'key': 'clientId', 'type': 'str'}, } - def __init__(self, *, principal_id: str=None, client_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + principal_id: Optional[str] = None, + client_id: Optional[str] = None, + **kwargs + ): super(IdentityItemDescription, self).__init__(**kwargs) self.principal_id = principal_id self.client_id = client_id -class ImageRegistryCredential(Model): +class ImageRegistryCredential(msrest.serialization.Model): """Image registry credential. All required parameters must be populated in order to send to Azure. - :param server: Required. Docker image registry server, without protocol - such as `http` and `https`. + :param server: Required. Docker image registry server, without protocol such as ``http`` and + ``https``. :type server: str :param username: Required. The username for the private registry. :type username: str - :param password_type: The type of the image registry password being given - in password. Possible values include: 'ClearText', 'KeyVaultReference', - 'SecretValueReference'. Default value: "ClearText" . - :type password_type: str or - ~azure.servicefabric.models.ImageRegistryPasswordType - :param password: The password for the private registry. The password is - required for create or update operations, however it is not returned in - the get or list operations. Will be processed based on the type provided. + :param password_type: The type of the image registry password being given in password. Possible + values include: "ClearText", "KeyVaultReference", "SecretValueReference". Default value: + "ClearText". + :type password_type: str or ~azure.servicefabric.models.ImageRegistryPasswordType + :param password: The password for the private registry. The password is required for create or + update operations, however it is not returned in the get or list operations. Will be processed + based on the type provided. :type password: str """ @@ -11185,7 +13389,15 @@ class ImageRegistryCredential(Model): 'password': {'key': 'password', 'type': 'str'}, } - def __init__(self, *, server: str, username: str, password_type="ClearText", password: str=None, **kwargs) -> None: + def __init__( + self, + *, + server: str, + username: str, + password_type: Optional[Union[str, "ImageRegistryPasswordType"]] = "ClearText", + password: Optional[str] = None, + **kwargs + ): super(ImageRegistryCredential, self).__init__(**kwargs) self.server = server self.username = username @@ -11193,14 +13405,14 @@ def __init__(self, *, server: str, username: str, password_type="ClearText", pas self.password = password -class ImageStoreContent(Model): +class ImageStoreContent(msrest.serialization.Model): """Information about the image store content. - :param store_files: The list of image store file info objects represents - files found under the given image store relative path. + :param store_files: The list of image store file info objects represents files found under the + given image store relative path. :type store_files: list[~azure.servicefabric.models.FileInfo] - :param store_folders: The list of image store folder info objects - represents subfolders found under the given image store relative path. + :param store_folders: The list of image store folder info objects represents subfolders found + under the given image store relative path. :type store_folders: list[~azure.servicefabric.models.FolderInfo] """ @@ -11209,31 +13421,35 @@ class ImageStoreContent(Model): 'store_folders': {'key': 'StoreFolders', 'type': '[FolderInfo]'}, } - def __init__(self, *, store_files=None, store_folders=None, **kwargs) -> None: + def __init__( + self, + *, + store_files: Optional[List["FileInfo"]] = None, + store_folders: Optional[List["FolderInfo"]] = None, + **kwargs + ): super(ImageStoreContent, self).__init__(**kwargs) self.store_files = store_files self.store_folders = store_folders -class ImageStoreCopyDescription(Model): - """Information about how to copy image store content from one image store - relative path to another image store relative path. +class ImageStoreCopyDescription(msrest.serialization.Model): + """Information about how to copy image store content from one image store relative path to another image store relative path. All required parameters must be populated in order to send to Azure. - :param remote_source: Required. The relative path of source image store - content to be copied from. + :param remote_source: Required. The relative path of source image store content to be copied + from. :type remote_source: str - :param remote_destination: Required. The relative path of destination - image store content to be copied to. + :param remote_destination: Required. The relative path of destination image store content to be + copied to. :type remote_destination: str :param skip_files: The list of the file names to be skipped for copying. :type skip_files: list[str] - :param check_mark_file: Indicates whether to check mark file during - copying. The property is true if checking mark file is required, false - otherwise. The mark file is used to check whether the folder is well - constructed. If the property is true and mark file does not exist, the - copy is skipped. + :param check_mark_file: Indicates whether to check mark file during copying. The property is + true if checking mark file is required, false otherwise. The mark file is used to check whether + the folder is well constructed. If the property is true and mark file does not exist, the copy + is skipped. :type check_mark_file: bool """ @@ -11249,7 +13465,15 @@ class ImageStoreCopyDescription(Model): 'check_mark_file': {'key': 'CheckMarkFile', 'type': 'bool'}, } - def __init__(self, *, remote_source: str, remote_destination: str, skip_files=None, check_mark_file: bool=None, **kwargs) -> None: + def __init__( + self, + *, + remote_source: str, + remote_destination: str, + skip_files: Optional[List[str]] = None, + check_mark_file: Optional[bool] = None, + **kwargs + ): super(ImageStoreCopyDescription, self).__init__(**kwargs) self.remote_source = remote_source self.remote_destination = remote_destination @@ -11257,27 +13481,27 @@ def __init__(self, *, remote_source: str, remote_destination: str, skip_files=No self.check_mark_file = check_mark_file -class ImageStoreInfo(Model): +class ImageStoreInfo(msrest.serialization.Model): """Information about the ImageStore's resource usage. - :param disk_info: disk capacity and available disk space on the node where - the ImageStore primary is placed. + :param disk_info: disk capacity and available disk space on the node where the ImageStore + primary is placed. :type disk_info: ~azure.servicefabric.models.DiskInfo :param used_by_metadata: the ImageStore's file system usage for metadata. :type used_by_metadata: ~azure.servicefabric.models.UsageInfo - :param used_by_staging: The ImageStore's file system usage for staging - files that are being uploaded. + :param used_by_staging: The ImageStore's file system usage for staging files that are being + uploaded. :type used_by_staging: ~azure.servicefabric.models.UsageInfo - :param used_by_copy: the ImageStore's file system usage for copied - application and cluster packages. [Removing application and cluster - packages](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-deleteimagestorecontent) - will free up this space. + :param used_by_copy: the ImageStore's file system usage for copied application and cluster + packages. `Removing application and cluster packages + `_ will + free up this space. :type used_by_copy: ~azure.servicefabric.models.UsageInfo - :param used_by_register: the ImageStore's file system usage for registered - and cluster packages. [Unregistering - application](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-unprovisionapplicationtype) - and [cluster - packages](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-unprovisionapplicationtype) + :param used_by_register: the ImageStore's file system usage for registered and cluster + packages. `Unregistering application + `_ + and `cluster packages + `_ will free up this space. :type used_by_register: ~azure.servicefabric.models.UsageInfo """ @@ -11290,7 +13514,16 @@ class ImageStoreInfo(Model): 'used_by_register': {'key': 'UsedByRegister', 'type': 'UsageInfo'}, } - def __init__(self, *, disk_info=None, used_by_metadata=None, used_by_staging=None, used_by_copy=None, used_by_register=None, **kwargs) -> None: + def __init__( + self, + *, + disk_info: Optional["DiskInfo"] = None, + used_by_metadata: Optional["UsageInfo"] = None, + used_by_staging: Optional["UsageInfo"] = None, + used_by_copy: Optional["UsageInfo"] = None, + used_by_register: Optional["UsageInfo"] = None, + **kwargs + ): super(ImageStoreInfo, self).__init__(**kwargs) self.disk_info = disk_info self.used_by_metadata = used_by_metadata @@ -11299,17 +13532,17 @@ def __init__(self, *, disk_info=None, used_by_metadata=None, used_by_staging=Non self.used_by_register = used_by_register -class SecretResourcePropertiesBase(Model): - """This type describes the properties of a secret resource, including its - kind. +class SecretResourcePropertiesBase(msrest.serialization.Model): + """This type describes the properties of a secret resource, including its kind. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SecretResourceProperties + sub-classes are: SecretResourceProperties. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values + include: "inlinedValue", "keyVaultVersionedReference". + :type kind: str or ~azure.servicefabric.models.SecretKind """ _validation = { @@ -11324,35 +13557,36 @@ class SecretResourcePropertiesBase(Model): 'kind': {'SecretResourceProperties': 'SecretResourceProperties'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(SecretResourcePropertiesBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class SecretResourceProperties(SecretResourcePropertiesBase): """Describes the properties of a secret resource. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: InlinedValueSecretResourceProperties + sub-classes are: InlinedValueSecretResourceProperties. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values + include: "inlinedValue", "keyVaultVersionedReference". + :type kind: str or ~azure.servicefabric.models.SecretKind :param description: User readable description of the secret. :type description: str - :ivar status: Status of the resource. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the secret. + :ivar status_details: Gives additional information about the current status of the secret. :vartype status_details: str - :param content_type: The type of the content stored in the secret value. - The value of this property is opaque to Service Fabric. Once set, the - value of this property cannot be changed. + :param content_type: The type of the content stored in the secret value. The value of this + property is opaque to Service Fabric. Once set, the value of this property cannot be changed. :type content_type: str """ @@ -11374,40 +13608,40 @@ class SecretResourceProperties(SecretResourcePropertiesBase): 'kind': {'inlinedValue': 'InlinedValueSecretResourceProperties'} } - def __init__(self, *, description: str=None, content_type: str=None, **kwargs) -> None: + def __init__( + self, + *, + description: Optional[str] = None, + content_type: Optional[str] = None, + **kwargs + ): super(SecretResourceProperties, self).__init__(**kwargs) + self.kind = 'SecretResourceProperties' # type: str self.description = description self.status = None self.status_details = None self.content_type = content_type - self.kind = 'SecretResourceProperties' class InlinedValueSecretResourceProperties(SecretResourceProperties): - """Describes the properties of a secret resource whose value is provided - explicitly as plaintext. The secret resource may have multiple values, each - being uniquely versioned. The secret value of each version is stored - encrypted, and delivered as plaintext into the context of applications - referencing it. + """Describes the properties of a secret resource whose value is provided explicitly as plaintext. The secret resource may have multiple values, each being uniquely versioned. The secret value of each version is stored encrypted, and delivered as plaintext into the context of applications referencing it. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values + include: "inlinedValue", "keyVaultVersionedReference". + :type kind: str or ~azure.servicefabric.models.SecretKind :param description: User readable description of the secret. :type description: str - :ivar status: Status of the resource. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the secret. + :ivar status_details: Gives additional information about the current status of the secret. :vartype status_details: str - :param content_type: The type of the content stored in the secret value. - The value of this property is opaque to Service Fabric. Once set, the - value of this property cannot be changed. + :param content_type: The type of the content stored in the secret value. The value of this + property is opaque to Service Fabric. Once set, the value of this property cannot be changed. :type content_type: str """ @@ -11425,20 +13659,50 @@ class InlinedValueSecretResourceProperties(SecretResourceProperties): 'content_type': {'key': 'contentType', 'type': 'str'}, } - def __init__(self, *, description: str=None, content_type: str=None, **kwargs) -> None: + def __init__( + self, + *, + description: Optional[str] = None, + content_type: Optional[str] = None, + **kwargs + ): super(InlinedValueSecretResourceProperties, self).__init__(description=description, content_type=content_type, **kwargs) - self.kind = 'inlinedValue' + self.kind = 'inlinedValue' # type: str -class Int64PropertyValue(PropertyValue): - """Describes a Service Fabric property value of type Int64. - - All required parameters must be populated in order to send to Azure. +class InstanceLifecycleDescription(msrest.serialization.Model): + """Describes how the instance will behave. - :param kind: Required. Constant filled by server. - :type kind: str - :param data: Required. The data of the property value. - :type data: str + :param restore_replica_location_after_upgrade: If set to true, move/swap replica to original + location after upgrade. + :type restore_replica_location_after_upgrade: bool + """ + + _attribute_map = { + 'restore_replica_location_after_upgrade': {'key': 'RestoreReplicaLocationAfterUpgrade', 'type': 'bool'}, + } + + def __init__( + self, + *, + restore_replica_location_after_upgrade: Optional[bool] = None, + **kwargs + ): + super(InstanceLifecycleDescription, self).__init__(**kwargs) + self.restore_replica_location_after_upgrade = restore_replica_location_after_upgrade + + +class Int64PropertyValue(PropertyValue): + """Describes a Service Fabric property value of type Int64. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param data: Required. The data of the property value. + :type data: str """ _validation = { @@ -11451,30 +13715,34 @@ class Int64PropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__(self, *, data: str, **kwargs) -> None: + def __init__( + self, + *, + data: str, + **kwargs + ): super(Int64PropertyValue, self).__init__(**kwargs) + self.kind = 'Int64' # type: str self.data = data - self.kind = 'Int64' -class PartitionInformation(Model): - """Information about the partition identity, partitioning scheme and keys - supported by it. +class PartitionInformation(msrest.serialization.Model): + """Information about the partition identity, partitioning scheme and keys supported by it. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, - SingletonPartitionInformation + sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, SingletonPartitionInformation. All required parameters must be populated in order to send to Azure. - :param id: An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. - The partition ID is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the IDs of its - partitions would be different. + :param service_partition_kind: Required. The kind of partitioning scheme used to partition the + service.Constant filled by server. Possible values include: "Invalid", "Singleton", + "Int64Range", "Named". + :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a + randomly generated GUID when the service was created. The partition ID is unique and does not + change for the lifetime of the service. If the same service was deleted and recreated the IDs + of its partitions would be different. :type id: str - :param service_partition_kind: Required. Constant filled by server. - :type service_partition_kind: str """ _validation = { @@ -11482,38 +13750,42 @@ class PartitionInformation(Model): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'str'}, } _subtype_map = { 'service_partition_kind': {'Int64Range': 'Int64RangePartitionInformation', 'Named': 'NamedPartitionInformation', 'Singleton': 'SingletonPartitionInformation'} } - def __init__(self, *, id: str=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + **kwargs + ): super(PartitionInformation, self).__init__(**kwargs) + self.service_partition_kind = None # type: Optional[str] self.id = id - self.service_partition_kind = None class Int64RangePartitionInformation(PartitionInformation): - """Describes the partition information for the integer range that is based on - partition schemes. + """Describes the partition information for the integer range that is based on partition schemes. All required parameters must be populated in order to send to Azure. - :param id: An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. - The partition ID is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the IDs of its - partitions would be different. + :param service_partition_kind: Required. The kind of partitioning scheme used to partition the + service.Constant filled by server. Possible values include: "Invalid", "Singleton", + "Int64Range", "Named". + :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a + randomly generated GUID when the service was created. The partition ID is unique and does not + change for the lifetime of the service. If the same service was deleted and recreated the IDs + of its partitions would be different. :type id: str - :param service_partition_kind: Required. Constant filled by server. - :type service_partition_kind: str :param low_key: Specifies the minimum key value handled by this partition. :type low_key: str - :param high_key: Specifies the maximum key value handled by this - partition. + :param high_key: Specifies the maximum key value handled by this partition. :type high_key: str """ @@ -11522,28 +13794,34 @@ class Int64RangePartitionInformation(PartitionInformation): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'str'}, 'low_key': {'key': 'LowKey', 'type': 'str'}, 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__(self, *, id: str=None, low_key: str=None, high_key: str=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + low_key: Optional[str] = None, + high_key: Optional[str] = None, + **kwargs + ): super(Int64RangePartitionInformation, self).__init__(id=id, **kwargs) + self.service_partition_kind = 'Int64Range' # type: str self.low_key = low_key self.high_key = high_key - self.service_partition_kind = 'Int64Range' -class InvokeDataLossResult(Model): - """Represents information about an operation in a terminal state (Completed or - Faulted). +class InvokeDataLossResult(msrest.serialization.Model): + """Represents information about an operation in a terminal state (Completed or Faulted). - :param error_code: If OperationState is Completed, this is 0. If - OperationState is Faulted, this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, + this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the - partition that the user-induced operation acted upon. + :param selected_partition: This class returns information about the partition that the + user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -11552,21 +13830,26 @@ class InvokeDataLossResult(Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__(self, *, error_code: int=None, selected_partition=None, **kwargs) -> None: + def __init__( + self, + *, + error_code: Optional[int] = None, + selected_partition: Optional["SelectedPartition"] = None, + **kwargs + ): super(InvokeDataLossResult, self).__init__(**kwargs) self.error_code = error_code self.selected_partition = selected_partition -class InvokeQuorumLossResult(Model): - """Represents information about an operation in a terminal state (Completed or - Faulted). +class InvokeQuorumLossResult(msrest.serialization.Model): + """Represents information about an operation in a terminal state (Completed or Faulted). - :param error_code: If OperationState is Completed, this is 0. If - OperationState is Faulted, this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, + this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the - partition that the user-induced operation acted upon. + :param selected_partition: This class returns information about the partition that the + user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -11575,22 +13858,29 @@ class InvokeQuorumLossResult(Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__(self, *, error_code: int=None, selected_partition=None, **kwargs) -> None: + def __init__( + self, + *, + error_code: Optional[int] = None, + selected_partition: Optional["SelectedPartition"] = None, + **kwargs + ): super(InvokeQuorumLossResult, self).__init__(**kwargs) self.error_code = error_code self.selected_partition = selected_partition -class ReplicaStatusBase(Model): +class ReplicaStatusBase(msrest.serialization.Model): """Information about the replica. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: KeyValueStoreReplicaStatus + sub-classes are: KeyValueStoreReplicaStatus. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Invalid", "KeyValueStore". + :type kind: str or ~azure.servicefabric.models.ReplicaKind """ _validation = { @@ -11605,9 +13895,12 @@ class ReplicaStatusBase(Model): 'kind': {'KeyValueStore': 'KeyValueStoreReplicaStatus'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(ReplicaStatusBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class KeyValueStoreReplicaStatus(ReplicaStatusBase): @@ -11615,24 +13908,22 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param database_row_count_estimate: Value indicating the estimated number - of rows in the underlying database. + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Invalid", "KeyValueStore". + :type kind: str or ~azure.servicefabric.models.ReplicaKind + :param database_row_count_estimate: Value indicating the estimated number of rows in the + underlying database. :type database_row_count_estimate: str - :param database_logical_size_estimate: Value indicating the estimated size - of the underlying database. + :param database_logical_size_estimate: Value indicating the estimated size of the underlying + database. :type database_logical_size_estimate: str - :param copy_notification_current_key_filter: Value indicating the latest - key-prefix filter applied to enumeration during the callback. Null if - there is no pending callback. + :param copy_notification_current_key_filter: Value indicating the latest key-prefix filter + applied to enumeration during the callback. Null if there is no pending callback. :type copy_notification_current_key_filter: str - :param copy_notification_current_progress: Value indicating the latest - number of keys enumerated during the callback. 0 if there is no pending - callback. + :param copy_notification_current_progress: Value indicating the latest number of keys + enumerated during the callback. 0 if there is no pending callback. :type copy_notification_current_progress: str - :param status_details: Value indicating the current status details of the - replica. + :param status_details: Value indicating the current status details of the replica. :type status_details: str """ @@ -11649,95 +13940,219 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__(self, *, database_row_count_estimate: str=None, database_logical_size_estimate: str=None, copy_notification_current_key_filter: str=None, copy_notification_current_progress: str=None, status_details: str=None, **kwargs) -> None: + def __init__( + self, + *, + database_row_count_estimate: Optional[str] = None, + database_logical_size_estimate: Optional[str] = None, + copy_notification_current_key_filter: Optional[str] = None, + copy_notification_current_progress: Optional[str] = None, + status_details: Optional[str] = None, + **kwargs + ): super(KeyValueStoreReplicaStatus, self).__init__(**kwargs) + self.kind = 'KeyValueStore' # type: str self.database_row_count_estimate = database_row_count_estimate self.database_logical_size_estimate = database_logical_size_estimate self.copy_notification_current_key_filter = copy_notification_current_key_filter self.copy_notification_current_progress = copy_notification_current_progress self.status_details = status_details - self.kind = 'KeyValueStore' -class LoadMetricInformation(Model): - """Represents data structure that contains load information for a certain - metric in a cluster. +class LoadedPartitionInformationQueryDescription(msrest.serialization.Model): + """Represents data structure that contains query information. - :param name: Name of the metric for which this load information is - provided. + :param metric_name: Name of the metric for which this information is provided. + :type metric_name: str + :param service_name: Name of the service this partition belongs to. + :type service_name: str + :param ordering: Ordering of partitions' load. Possible values include: "Desc", "Asc". + :type ordering: str or ~azure.servicefabric.models.Ordering + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. + :type continuation_token: str + """ + + _attribute_map = { + 'metric_name': {'key': 'MetricName', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'ordering': {'key': 'Ordering', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'long'}, + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + metric_name: Optional[str] = None, + service_name: Optional[str] = None, + ordering: Optional[Union[str, "Ordering"]] = None, + max_results: Optional[int] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + super(LoadedPartitionInformationQueryDescription, self).__init__(**kwargs) + self.metric_name = metric_name + self.service_name = service_name + self.ordering = ordering + self.max_results = max_results + self.continuation_token = continuation_token + + +class LoadedPartitionInformationResult(msrest.serialization.Model): + """Represents partition information. + + All required parameters must be populated in order to send to Azure. + + :param service_name: Required. Name of the service this partition belongs to. + :type service_name: str + :param partition_id: Required. Id of the partition. + :type partition_id: str + :param metric_name: Required. Name of the metric for which this information is provided. + :type metric_name: str + :param load: Required. Load for metric. + :type load: long + """ + + _validation = { + 'service_name': {'required': True}, + 'partition_id': {'required': True}, + 'metric_name': {'required': True}, + 'load': {'required': True}, + } + + _attribute_map = { + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'metric_name': {'key': 'MetricName', 'type': 'str'}, + 'load': {'key': 'Load', 'type': 'long'}, + } + + def __init__( + self, + *, + service_name: str, + partition_id: str, + metric_name: str, + load: int, + **kwargs + ): + super(LoadedPartitionInformationResult, self).__init__(**kwargs) + self.service_name = service_name + self.partition_id = partition_id + self.metric_name = metric_name + self.load = load + + +class LoadedPartitionInformationResultList(msrest.serialization.Model): + """Represents data structure that contains top/least loaded partitions for a certain metric. + + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. + :type continuation_token: str + :param items: List of application information. + :type items: list[~azure.servicefabric.models.LoadedPartitionInformationResult] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[LoadedPartitionInformationResult]'}, + } + + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["LoadedPartitionInformationResult"]] = None, + **kwargs + ): + super(LoadedPartitionInformationResultList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items + + +class LoadMetricInformation(msrest.serialization.Model): + """Represents data structure that contains load information for a certain metric in a cluster. + + :param name: Name of the metric for which this load information is provided. :type name: str - :param is_balanced_before: Value that indicates whether the metrics is - balanced or not before resource balancer run + :param is_balanced_before: Value that indicates whether the metrics is balanced or not before + resource balancer run. :type is_balanced_before: bool - :param is_balanced_after: Value that indicates whether the metrics is - balanced or not after resource balancer run. + :param is_balanced_after: Value that indicates whether the metrics is balanced or not after + resource balancer run. :type is_balanced_after: bool - :param deviation_before: The standard average deviation of the metrics - before resource balancer run. + :param deviation_before: The standard average deviation of the metrics before resource balancer + run. :type deviation_before: str - :param deviation_after: The standard average deviation of the metrics - after resource balancer run. + :param deviation_after: The standard average deviation of the metrics after resource balancer + run. :type deviation_after: str :param balancing_threshold: The balancing threshold for a certain metric. :type balancing_threshold: str - :param action: The current action being taken with regard to this metric + :param action: The current action being taken with regard to this metric. :type action: str - :param activity_threshold: The Activity Threshold specified for this - metric in the system Cluster Manifest. + :param activity_threshold: The Activity Threshold specified for this metric in the system + Cluster Manifest. :type activity_threshold: str - :param cluster_capacity: The total cluster capacity for a given metric + :param cluster_capacity: The total cluster capacity for a given metric. :type cluster_capacity: str - :param cluster_load: The total cluster load. In future releases of Service - Fabric this parameter will be deprecated in favor of CurrentClusterLoad. + :param cluster_load: The total cluster load. In future releases of Service Fabric this + parameter will be deprecated in favor of CurrentClusterLoad. :type cluster_load: str :param current_cluster_load: The total cluster load. :type current_cluster_load: str - :param cluster_remaining_capacity: The remaining capacity for the metric - in the cluster. In future releases of Service Fabric this parameter will - be deprecated in favor of ClusterCapacityRemaining. + :param cluster_remaining_capacity: The remaining capacity for the metric in the cluster. In + future releases of Service Fabric this parameter will be deprecated in favor of + ClusterCapacityRemaining. :type cluster_remaining_capacity: str - :param cluster_capacity_remaining: The remaining capacity for the metric - in the cluster. + :param cluster_capacity_remaining: The remaining capacity for the metric in the cluster. :type cluster_capacity_remaining: str - :param is_cluster_capacity_violation: Indicates that the metric is - currently over capacity in the cluster. + :param is_cluster_capacity_violation: Indicates that the metric is currently over capacity in + the cluster. :type is_cluster_capacity_violation: bool - :param node_buffer_percentage: The reserved percentage of total node - capacity for this metric. + :param node_buffer_percentage: The reserved percentage of total node capacity for this metric. :type node_buffer_percentage: str - :param cluster_buffered_capacity: Remaining capacity in the cluster - excluding the reserved space. In future releases of Service Fabric this - parameter will be deprecated in favor of BufferedClusterCapacityRemaining. + :param cluster_buffered_capacity: Remaining capacity in the cluster excluding the reserved + space. In future releases of Service Fabric this parameter will be deprecated in favor of + BufferedClusterCapacityRemaining. :type cluster_buffered_capacity: str - :param buffered_cluster_capacity_remaining: Remaining capacity in the - cluster excluding the reserved space. + :param buffered_cluster_capacity_remaining: Remaining capacity in the cluster excluding the + reserved space. :type buffered_cluster_capacity_remaining: str - :param cluster_remaining_buffered_capacity: The remaining percentage of - cluster total capacity for this metric. + :param cluster_remaining_buffered_capacity: The remaining percentage of cluster total capacity + for this metric. :type cluster_remaining_buffered_capacity: str - :param min_node_load_value: The minimum load on any node for this metric. - In future releases of Service Fabric this parameter will be deprecated in - favor of MinimumNodeLoad. + :param min_node_load_value: The minimum load on any node for this metric. In future releases of + Service Fabric this parameter will be deprecated in favor of MinimumNodeLoad. :type min_node_load_value: str :param minimum_node_load: The minimum load on any node for this metric. :type minimum_node_load: str - :param min_node_load_node_id: The node id of the node with the minimum - load for this metric. + :param min_node_load_node_id: The node id of the node with the minimum load for this metric. :type min_node_load_node_id: ~azure.servicefabric.models.NodeId - :param max_node_load_value: The maximum load on any node for this metric. - In future releases of Service Fabric this parameter will be deprecated in - favor of MaximumNodeLoad. + :param max_node_load_value: The maximum load on any node for this metric. In future releases of + Service Fabric this parameter will be deprecated in favor of MaximumNodeLoad. :type max_node_load_value: str :param maximum_node_load: The maximum load on any node for this metric. :type maximum_node_load: str - :param max_node_load_node_id: The node id of the node with the maximum - load for this metric. + :param max_node_load_node_id: The node id of the node with the maximum load for this metric. :type max_node_load_node_id: ~azure.servicefabric.models.NodeId - :param planned_load_removal: This value represents the load of the - replicas that are planned to be removed in the future within the cluster. - This kind of load is reported for replicas that are currently being moving - to other nodes and for replicas that are currently being dropped but still - use the load on the source node. + :param planned_load_removal: This value represents the load of the replicas that are planned to + be removed in the future within the cluster. + This kind of load is reported for replicas that are currently being moving to other nodes and + for replicas that are currently being dropped but still use the load on the source node. :type planned_load_removal: str """ @@ -11769,7 +14184,36 @@ class LoadMetricInformation(Model): 'planned_load_removal': {'key': 'PlannedLoadRemoval', 'type': 'str'}, } - def __init__(self, *, name: str=None, is_balanced_before: bool=None, is_balanced_after: bool=None, deviation_before: str=None, deviation_after: str=None, balancing_threshold: str=None, action: str=None, activity_threshold: str=None, cluster_capacity: str=None, cluster_load: str=None, current_cluster_load: str=None, cluster_remaining_capacity: str=None, cluster_capacity_remaining: str=None, is_cluster_capacity_violation: bool=None, node_buffer_percentage: str=None, cluster_buffered_capacity: str=None, buffered_cluster_capacity_remaining: str=None, cluster_remaining_buffered_capacity: str=None, min_node_load_value: str=None, minimum_node_load: str=None, min_node_load_node_id=None, max_node_load_value: str=None, maximum_node_load: str=None, max_node_load_node_id=None, planned_load_removal: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + is_balanced_before: Optional[bool] = None, + is_balanced_after: Optional[bool] = None, + deviation_before: Optional[str] = None, + deviation_after: Optional[str] = None, + balancing_threshold: Optional[str] = None, + action: Optional[str] = None, + activity_threshold: Optional[str] = None, + cluster_capacity: Optional[str] = None, + cluster_load: Optional[str] = None, + current_cluster_load: Optional[str] = None, + cluster_remaining_capacity: Optional[str] = None, + cluster_capacity_remaining: Optional[str] = None, + is_cluster_capacity_violation: Optional[bool] = None, + node_buffer_percentage: Optional[str] = None, + cluster_buffered_capacity: Optional[str] = None, + buffered_cluster_capacity_remaining: Optional[str] = None, + cluster_remaining_buffered_capacity: Optional[str] = None, + min_node_load_value: Optional[str] = None, + minimum_node_load: Optional[str] = None, + min_node_load_node_id: Optional["NodeId"] = None, + max_node_load_value: Optional[str] = None, + maximum_node_load: Optional[str] = None, + max_node_load_node_id: Optional["NodeId"] = None, + planned_load_removal: Optional[str] = None, + **kwargs + ): super(LoadMetricInformation, self).__init__(**kwargs) self.name = name self.is_balanced_before = is_balanced_before @@ -11798,16 +14242,15 @@ def __init__(self, *, name: str=None, is_balanced_before: bool=None, is_balanced self.planned_load_removal = planned_load_removal -class LoadMetricReport(Model): - """Represents the load metric report which contains the time metric was - reported, its name and value. +class LoadMetricReport(msrest.serialization.Model): + """Represents the load metric report which contains the time metric was reported, its name and value. :param last_reported_utc: Gets the UTC time when the load was reported. - :type last_reported_utc: datetime + :type last_reported_utc: ~datetime.datetime :param name: The name of the load metric. :type name: str - :param value: The value of the load metric. In future releases of Service - Fabric this parameter will be deprecated in favor of CurrentValue. + :param value: The value of the load metric. In future releases of Service Fabric this parameter + will be deprecated in favor of CurrentValue. :type value: str :param current_value: The value of the load metric. :type current_value: str @@ -11820,7 +14263,15 @@ class LoadMetricReport(Model): 'current_value': {'key': 'CurrentValue', 'type': 'str'}, } - def __init__(self, *, last_reported_utc=None, name: str=None, value: str=None, current_value: str=None, **kwargs) -> None: + def __init__( + self, + *, + last_reported_utc: Optional[datetime.datetime] = None, + name: Optional[str] = None, + value: Optional[str] = None, + current_value: Optional[str] = None, + **kwargs + ): super(LoadMetricReport, self).__init__(**kwargs) self.last_reported_utc = last_reported_utc self.name = name @@ -11828,18 +14279,18 @@ def __init__(self, *, last_reported_utc=None, name: str=None, value: str=None, c self.current_value = current_value -class LoadMetricReportInfo(Model): +class LoadMetricReportInfo(msrest.serialization.Model): """Information about load reported by replica. :param name: The name of the metric. :type name: str - :param value: The value of the load for the metric. In future releases of - Service Fabric this parameter will be deprecated in favor of CurrentValue. + :param value: The value of the load for the metric. In future releases of Service Fabric this + parameter will be deprecated in favor of CurrentValue. :type value: int :param current_value: The double value of the load for the metric. :type current_value: str :param last_reported_utc: The UTC time when the load is reported. - :type last_reported_utc: datetime + :type last_reported_utc: ~datetime.datetime """ _attribute_map = { @@ -11849,7 +14300,15 @@ class LoadMetricReportInfo(Model): 'last_reported_utc': {'key': 'LastReportedUtc', 'type': 'iso-8601'}, } - def __init__(self, *, name: str=None, value: int=None, current_value: str=None, last_reported_utc=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + value: Optional[int] = None, + current_value: Optional[str] = None, + last_reported_utc: Optional[datetime.datetime] = None, + **kwargs + ): super(LoadMetricReportInfo, self).__init__(**kwargs) self.name = name self.value = value @@ -11857,17 +14316,17 @@ def __init__(self, *, name: str=None, value: int=None, current_value: str=None, self.last_reported_utc = last_reported_utc -class NetworkResourcePropertiesBase(Model): - """This type describes the properties of a network resource, including its - kind. +class NetworkResourcePropertiesBase(msrest.serialization.Model): + """This type describes the properties of a network resource, including its kind. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NetworkResourceProperties + sub-classes are: NetworkResourceProperties. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of a Service Fabric container network.Constant filled by + server. Possible values include: "Local". + :type kind: str or ~azure.servicefabric.models.NetworkKind """ _validation = { @@ -11882,31 +14341,33 @@ class NetworkResourcePropertiesBase(Model): 'kind': {'NetworkResourceProperties': 'NetworkResourceProperties'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(NetworkResourcePropertiesBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class NetworkResourceProperties(NetworkResourcePropertiesBase): """Describes properties of a network resource. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LocalNetworkResourceProperties + sub-classes are: LocalNetworkResourceProperties. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of a Service Fabric container network.Constant filled by + server. Possible values include: "Local". + :type kind: str or ~azure.servicefabric.models.NetworkKind :param description: User readable description of the network. :type description: str - :ivar status: Status of the network. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the network. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the network. + :ivar status_details: Gives additional information about the current status of the network. :vartype status_details: str """ @@ -11927,35 +14388,37 @@ class NetworkResourceProperties(NetworkResourcePropertiesBase): 'kind': {'Local': 'LocalNetworkResourceProperties'} } - def __init__(self, *, description: str=None, **kwargs) -> None: + def __init__( + self, + *, + description: Optional[str] = None, + **kwargs + ): super(NetworkResourceProperties, self).__init__(**kwargs) + self.kind = 'NetworkResourceProperties' # type: str self.description = description self.status = None self.status_details = None - self.kind = 'NetworkResourceProperties' class LocalNetworkResourceProperties(NetworkResourceProperties): - """Information about a Service Fabric container network local to a single - Service Fabric cluster. + """Information about a Service Fabric container network local to a single Service Fabric cluster. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The type of a Service Fabric container network.Constant filled by + server. Possible values include: "Local". + :type kind: str or ~azure.servicefabric.models.NetworkKind :param description: User readable description of the network. :type description: str - :ivar status: Status of the network. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the network. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the network. + :ivar status_details: Gives additional information about the current status of the network. :vartype status_details: str - :param network_address_prefix: Address space for the local container - network. + :param network_address_prefix: Address space for the local container network. :type network_address_prefix: str """ @@ -11973,13 +14436,19 @@ class LocalNetworkResourceProperties(NetworkResourceProperties): 'network_address_prefix': {'key': 'networkAddressPrefix', 'type': 'str'}, } - def __init__(self, *, description: str=None, network_address_prefix: str=None, **kwargs) -> None: + def __init__( + self, + *, + description: Optional[str] = None, + network_address_prefix: Optional[str] = None, + **kwargs + ): super(LocalNetworkResourceProperties, self).__init__(description=description, **kwargs) + self.kind = 'Local' # type: str self.network_address_prefix = network_address_prefix - self.kind = 'Local' -class ManagedApplicationIdentity(Model): +class ManagedApplicationIdentity(msrest.serialization.Model): """Describes a managed application identity. All required parameters must be populated in order to send to Azure. @@ -11999,20 +14468,25 @@ class ManagedApplicationIdentity(Model): 'principal_id': {'key': 'PrincipalId', 'type': 'str'}, } - def __init__(self, *, name: str, principal_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + principal_id: Optional[str] = None, + **kwargs + ): super(ManagedApplicationIdentity, self).__init__(**kwargs) self.name = name self.principal_id = principal_id -class ManagedApplicationIdentityDescription(Model): +class ManagedApplicationIdentityDescription(msrest.serialization.Model): """Managed application identity description. :param token_service_endpoint: Token service endpoint. :type token_service_endpoint: str :param managed_identities: A list of managed application identity objects. - :type managed_identities: - list[~azure.servicefabric.models.ManagedApplicationIdentity] + :type managed_identities: list[~azure.servicefabric.models.ManagedApplicationIdentity] """ _attribute_map = { @@ -12020,13 +14494,71 @@ class ManagedApplicationIdentityDescription(Model): 'managed_identities': {'key': 'ManagedIdentities', 'type': '[ManagedApplicationIdentity]'}, } - def __init__(self, *, token_service_endpoint: str=None, managed_identities=None, **kwargs) -> None: + def __init__( + self, + *, + token_service_endpoint: Optional[str] = None, + managed_identities: Optional[List["ManagedApplicationIdentity"]] = None, + **kwargs + ): super(ManagedApplicationIdentityDescription, self).__init__(**kwargs) self.token_service_endpoint = token_service_endpoint self.managed_identities = managed_identities -class MetricLoadDescription(Model): +class ManagedIdentityAzureBlobBackupStorageDescription(BackupStorageDescription): + """Describes the parameters for Azure blob store (connected using managed identity) used for storing and enumerating backups. + + All required parameters must be populated in order to send to Azure. + + :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant + filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", + "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". + :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind + :param friendly_name: Friendly name for this backup storage. + :type friendly_name: str + :param managed_identity_type: Required. The type of managed identity to be used to connect to + Azure Blob Store via Managed Identity. Possible values include: "Invalid", "VMSS", "Cluster". + :type managed_identity_type: str or ~azure.servicefabric.models.ManagedIdentityType + :param blob_service_uri: Required. The Blob Service Uri to connect to the Azure blob store.. + :type blob_service_uri: str + :param container_name: Required. The name of the container in the blob store to store and + enumerate backups from. + :type container_name: str + """ + + _validation = { + 'storage_kind': {'required': True}, + 'managed_identity_type': {'required': True}, + 'blob_service_uri': {'required': True}, + 'container_name': {'required': True}, + } + + _attribute_map = { + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'managed_identity_type': {'key': 'ManagedIdentityType', 'type': 'str'}, + 'blob_service_uri': {'key': 'BlobServiceUri', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + } + + def __init__( + self, + *, + managed_identity_type: Union[str, "ManagedIdentityType"], + blob_service_uri: str, + container_name: str, + friendly_name: Optional[str] = None, + **kwargs + ): + super(ManagedIdentityAzureBlobBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) + self.storage_kind = 'ManagedIdentityAzureBlobStore' # type: str + self.managed_identity_type = managed_identity_type + self.blob_service_uri = blob_service_uri + self.container_name = container_name + + +class MetricLoadDescription(msrest.serialization.Model): """Specifies metric load information. :param metric_name: The name of the reported metric. @@ -12043,52 +14575,54 @@ class MetricLoadDescription(Model): 'predicted_load': {'key': 'PredictedLoad', 'type': 'long'}, } - def __init__(self, *, metric_name: str=None, current_load: int=None, predicted_load: int=None, **kwargs) -> None: + def __init__( + self, + *, + metric_name: Optional[str] = None, + current_load: Optional[int] = None, + predicted_load: Optional[int] = None, + **kwargs + ): super(MetricLoadDescription, self).__init__(**kwargs) self.metric_name = metric_name self.current_load = current_load self.predicted_load = predicted_load -class MonitoringPolicyDescription(Model): +class MonitoringPolicyDescription(msrest.serialization.Model): """Describes the parameters for monitoring an upgrade in Monitored mode. - :param failure_action: The compensating action to perform when a Monitored - upgrade encounters monitoring policy or health policy violations. - Invalid indicates the failure action is invalid. Rollback specifies that - the upgrade will start rolling back automatically. - Manual indicates that the upgrade will switch to UnmonitoredManual upgrade - mode. Possible values include: 'Invalid', 'Rollback', 'Manual' + :param failure_action: The compensating action to perform when a Monitored upgrade encounters + monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will + start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. Possible + values include: "Invalid", "Rollback", "Manual". :type failure_action: str or ~azure.servicefabric.models.FailureAction - :param health_check_wait_duration_in_milliseconds: The amount of time to - wait after completing an upgrade domain before applying health policies. - It is first interpreted as a string representing an ISO 8601 duration. If - that fails, then it is interpreted as a number representing the total - number of milliseconds. + :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing + an upgrade domain before applying health policies. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted as a number + representing the total number of milliseconds. :type health_check_wait_duration_in_milliseconds: str - :param health_check_stable_duration_in_milliseconds: The amount of time - that the application or cluster must remain healthy before the upgrade - proceeds to the next upgrade domain. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param health_check_stable_duration_in_milliseconds: The amount of time that the application or + cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type health_check_stable_duration_in_milliseconds: str - :param health_check_retry_timeout_in_milliseconds: The amount of time to - retry health evaluation when the application or cluster is unhealthy - before FailureAction is executed. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param health_check_retry_timeout_in_milliseconds: The amount of time to retry health + evaluation when the application or cluster is unhealthy before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type health_check_retry_timeout_in_milliseconds: str - :param upgrade_timeout_in_milliseconds: The amount of time the overall - upgrade has to complete before FailureAction is executed. It is first - interpreted as a string representing an ISO 8601 duration. If that fails, - then it is interpreted as a number representing the total number of + :param upgrade_timeout_in_milliseconds: The amount of time the overall upgrade has to complete + before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 + duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout_in_milliseconds: str - :param upgrade_domain_timeout_in_milliseconds: The amount of time each - upgrade domain has to complete before FailureAction is executed. It is - first interpreted as a string representing an ISO 8601 duration. If that - fails, then it is interpreted as a number representing the total number of - milliseconds. + :param upgrade_domain_timeout_in_milliseconds: The amount of time each upgrade domain has to + complete before FailureAction is executed. It is first interpreted as a string representing an + ISO 8601 duration. If that fails, then it is interpreted as a number representing the total + number of milliseconds. :type upgrade_domain_timeout_in_milliseconds: str """ @@ -12101,7 +14635,17 @@ class MonitoringPolicyDescription(Model): 'upgrade_domain_timeout_in_milliseconds': {'key': 'UpgradeDomainTimeoutInMilliseconds', 'type': 'str'}, } - def __init__(self, *, failure_action=None, health_check_wait_duration_in_milliseconds: str=None, health_check_stable_duration_in_milliseconds: str=None, health_check_retry_timeout_in_milliseconds: str=None, upgrade_timeout_in_milliseconds: str=None, upgrade_domain_timeout_in_milliseconds: str=None, **kwargs) -> None: + def __init__( + self, + *, + failure_action: Optional[Union[str, "FailureAction"]] = None, + health_check_wait_duration_in_milliseconds: Optional[str] = "0", + health_check_stable_duration_in_milliseconds: Optional[str] = "PT0H2M0S", + health_check_retry_timeout_in_milliseconds: Optional[str] = "PT0H10M0S", + upgrade_timeout_in_milliseconds: Optional[str] = "P10675199DT02H48M05.4775807S", + upgrade_domain_timeout_in_milliseconds: Optional[str] = "P10675199DT02H48M05.4775807S", + **kwargs + ): super(MonitoringPolicyDescription, self).__init__(**kwargs) self.failure_action = failure_action self.health_check_wait_duration_in_milliseconds = health_check_wait_duration_in_milliseconds @@ -12111,13 +14655,12 @@ def __init__(self, *, failure_action=None, health_check_wait_duration_in_millise self.upgrade_domain_timeout_in_milliseconds = upgrade_domain_timeout_in_milliseconds -class NameDescription(Model): +class NameDescription(msrest.serialization.Model): """Describes a Service Fabric name. All required parameters must be populated in order to send to Azure. - :param name: Required. The Service Fabric name, including the 'fabric:' - URI scheme. + :param name: Required. The Service Fabric name, including the 'fabric:' URI scheme. :type name: str """ @@ -12129,25 +14672,30 @@ class NameDescription(Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, *, name: str, **kwargs) -> None: + def __init__( + self, + *, + name: str, + **kwargs + ): super(NameDescription, self).__init__(**kwargs) self.name = name class NamedPartitionInformation(PartitionInformation): - """Describes the partition information for the name as a string that is based - on partition schemes. + """Describes the partition information for the name as a string that is based on partition schemes. All required parameters must be populated in order to send to Azure. - :param id: An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. - The partition ID is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the IDs of its - partitions would be different. + :param service_partition_kind: Required. The kind of partitioning scheme used to partition the + service.Constant filled by server. Possible values include: "Invalid", "Singleton", + "Int64Range", "Named". + :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a + randomly generated GUID when the service was created. The partition ID is unique and does not + change for the lifetime of the service. If the same service was deleted and recreated the IDs + of its partitions would be different. :type id: str - :param service_partition_kind: Required. Constant filled by server. - :type service_partition_kind: str :param name: Name of the partition. :type name: str """ @@ -12157,29 +14705,34 @@ class NamedPartitionInformation(PartitionInformation): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, *, id: str=None, name: str=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + **kwargs + ): super(NamedPartitionInformation, self).__init__(id=id, **kwargs) + self.service_partition_kind = 'Named' # type: str self.name = name - self.service_partition_kind = 'Named' -class PartitionSchemeDescription(Model): +class PartitionSchemeDescription(msrest.serialization.Model): """Describes how the service is partitioned. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NamedPartitionSchemeDescription, - SingletonPartitionSchemeDescription, - UniformInt64RangePartitionSchemeDescription + sub-classes are: NamedPartitionSchemeDescription, SingletonPartitionSchemeDescription, UniformInt64RangePartitionSchemeDescription. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Constant filled by server. - :type partition_scheme: str + :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by + server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". + :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme """ _validation = { @@ -12194,9 +14747,12 @@ class PartitionSchemeDescription(Model): 'partition_scheme': {'Named': 'NamedPartitionSchemeDescription', 'Singleton': 'SingletonPartitionSchemeDescription', 'UniformInt64Range': 'UniformInt64RangePartitionSchemeDescription'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(PartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = None + self.partition_scheme = None # type: Optional[str] class NamedPartitionSchemeDescription(PartitionSchemeDescription): @@ -12204,12 +14760,13 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Constant filled by server. - :type partition_scheme: str + :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by + server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". + :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme :param count: Required. The number of partitions. :type count: int - :param names: Required. Array of size specified by the ‘Count’ parameter, - for the names of the partitions. + :param names: Required. Array of size specified by the ‘Count’ parameter, for the names of the + partitions. :type names: list[str] """ @@ -12225,20 +14782,25 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): 'names': {'key': 'Names', 'type': '[str]'}, } - def __init__(self, *, count: int, names, **kwargs) -> None: + def __init__( + self, + *, + count: int, + names: List[str], + **kwargs + ): super(NamedPartitionSchemeDescription, self).__init__(**kwargs) + self.partition_scheme = 'Named' # type: str self.count = count self.names = names - self.partition_scheme = 'Named' -class NetworkRef(Model): +class NetworkRef(msrest.serialization.Model): """Describes a network reference in a service. - :param name: Name of the network + :param name: Name of the network. :type name: str - :param endpoint_refs: A list of endpoints that are exposed on this - network. + :param endpoint_refs: A list of endpoints that are exposed on this network. :type endpoint_refs: list[~azure.servicefabric.models.EndpointRef] """ @@ -12247,13 +14809,19 @@ class NetworkRef(Model): 'endpoint_refs': {'key': 'endpointRefs', 'type': '[EndpointRef]'}, } - def __init__(self, *, name: str=None, endpoint_refs=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + endpoint_refs: Optional[List["EndpointRef"]] = None, + **kwargs + ): super(NetworkRef, self).__init__(**kwargs) self.name = name self.endpoint_refs = endpoint_refs -class NetworkResourceDescription(Model): +class NetworkResourceDescription(msrest.serialization.Model): """This type describes a network resource. All required parameters must be populated in order to send to Azure. @@ -12274,7 +14842,13 @@ class NetworkResourceDescription(Model): 'properties': {'key': 'properties', 'type': 'NetworkResourceProperties'}, } - def __init__(self, *, name: str, properties, **kwargs) -> None: + def __init__( + self, + *, + name: str, + properties: "NetworkResourceProperties", + **kwargs + ): super(NetworkResourceDescription, self).__init__(**kwargs) self.name = name self.properties = properties @@ -12285,18 +14859,38 @@ class NodeAbortedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -12318,9 +14912,9 @@ class NodeAbortedEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -12333,11 +14927,11 @@ class NodeAbortedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -12349,8 +14943,26 @@ class NodeAbortedEvent(NodeEvent): 'node_version': {'key': 'NodeVersion', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance: int, + node_id: str, + upgrade_domain: str, + fault_domain: str, + ip_address_or_fqdn: str, + hostname: str, + is_seed_node: bool, + node_version: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeAbortedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeAborted' # type: str self.node_instance = node_instance self.node_id = node_id self.upgrade_domain = upgrade_domain @@ -12359,7 +14971,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_i self.hostname = hostname self.is_seed_node = is_seed_node self.node_version = node_version - self.kind = 'NodeAborted' class NodeAddedToClusterEvent(NodeEvent): @@ -12367,18 +14978,38 @@ class NodeAddedToClusterEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -12396,9 +15027,9 @@ class NodeAddedToClusterEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -12409,11 +15040,11 @@ class NodeAddedToClusterEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, @@ -12423,15 +15054,30 @@ class NodeAddedToClusterEvent(NodeEvent): 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: int, node_type: str, fabric_version: str, ip_address_or_fqdn: str, node_capacities: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_id: str, + node_instance: int, + node_type: str, + fabric_version: str, + ip_address_or_fqdn: str, + node_capacities: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeAddedToClusterEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeAddedToCluster' # type: str self.node_id = node_id self.node_instance = node_instance self.node_type = node_type self.fabric_version = fabric_version self.ip_address_or_fqdn = ip_address_or_fqdn self.node_capacities = node_capacities - self.kind = 'NodeAddedToCluster' class NodeClosedEvent(NodeEvent): @@ -12439,18 +15085,38 @@ class NodeClosedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -12462,9 +15128,9 @@ class NodeClosedEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -12472,23 +15138,35 @@ class NodeClosedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'error': {'key': 'Error', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: int, error: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_id: str, + node_instance: int, + error: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeClosedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeClosed' # type: str self.node_id = node_id self.node_instance = node_instance self.error = error - self.kind = 'NodeClosed' class NodeDeactivateCompletedEvent(NodeEvent): @@ -12496,18 +15174,38 @@ class NodeDeactivateCompletedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -12517,13 +15215,13 @@ class NodeDeactivateCompletedEvent(NodeEvent): :param batch_ids_with_deactivate_intent: Required. Batch Ids. :type batch_ids_with_deactivate_intent: str :param start_time: Required. Start time. - :type start_time: datetime + :type start_time: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'effective_deactivate_intent': {'required': True}, @@ -12532,11 +15230,11 @@ class NodeDeactivateCompletedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'effective_deactivate_intent': {'key': 'EffectiveDeactivateIntent', 'type': 'str'}, @@ -12544,13 +15242,26 @@ class NodeDeactivateCompletedEvent(NodeEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, effective_deactivate_intent: str, batch_ids_with_deactivate_intent: str, start_time, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance: int, + effective_deactivate_intent: str, + batch_ids_with_deactivate_intent: str, + start_time: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeDeactivateCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeDeactivateCompleted' # type: str self.node_instance = node_instance self.effective_deactivate_intent = effective_deactivate_intent self.batch_ids_with_deactivate_intent = batch_ids_with_deactivate_intent self.start_time = start_time - self.kind = 'NodeDeactivateCompleted' class NodeDeactivateStartedEvent(NodeEvent): @@ -12558,18 +15269,38 @@ class NodeDeactivateStartedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -12581,9 +15312,9 @@ class NodeDeactivateStartedEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'batch_id': {'required': True}, @@ -12591,46 +15322,53 @@ class NodeDeactivateStartedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'batch_id': {'key': 'BatchId', 'type': 'str'}, 'deactivate_intent': {'key': 'DeactivateIntent', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, batch_id: str, deactivate_intent: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance: int, + batch_id: str, + deactivate_intent: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeDeactivateStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeDeactivateStarted' # type: str self.node_instance = node_instance self.batch_id = batch_id self.deactivate_intent = deactivate_intent - self.kind = 'NodeDeactivateStarted' -class NodeDeactivationInfo(Model): - """Information about the node deactivation. This information is valid for a - node that is undergoing deactivation or has already been deactivated. +class NodeDeactivationInfo(msrest.serialization.Model): + """Information about the node deactivation. This information is valid for a node that is undergoing deactivation or has already been deactivated. - :param node_deactivation_intent: The intent or the reason for deactivating - the node. Following are the possible values for it. Possible values - include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' - :type node_deactivation_intent: str or - ~azure.servicefabric.models.NodeDeactivationIntent - :param node_deactivation_status: The status of node deactivation - operation. Following are the possible values. Possible values include: - 'None', 'SafetyCheckInProgress', 'SafetyCheckComplete', 'Completed' - :type node_deactivation_status: str or - ~azure.servicefabric.models.NodeDeactivationStatus - :param node_deactivation_task: List of tasks representing the deactivation - operation on the node. - :type node_deactivation_task: - list[~azure.servicefabric.models.NodeDeactivationTask] - :param pending_safety_checks: List of pending safety checks - :type pending_safety_checks: - list[~azure.servicefabric.models.SafetyCheckWrapper] + :param node_deactivation_intent: The intent or the reason for deactivating the node. Following + are the possible values for it. Possible values include: "Invalid", "Pause", "Restart", + "RemoveData", "RemoveNode". + :type node_deactivation_intent: str or ~azure.servicefabric.models.NodeDeactivationIntent + :param node_deactivation_status: The status of node deactivation operation. Following are the + possible values. Possible values include: "None", "SafetyCheckInProgress", + "SafetyCheckComplete", "Completed". + :type node_deactivation_status: str or ~azure.servicefabric.models.NodeDeactivationStatus + :param node_deactivation_task: List of tasks representing the deactivation operation on the + node. + :type node_deactivation_task: list[~azure.servicefabric.models.NodeDeactivationTask] + :param pending_safety_checks: List of pending safety checks. + :type pending_safety_checks: list[~azure.servicefabric.models.SafetyCheckWrapper] """ _attribute_map = { @@ -12640,7 +15378,15 @@ class NodeDeactivationInfo(Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__(self, *, node_deactivation_intent=None, node_deactivation_status=None, node_deactivation_task=None, pending_safety_checks=None, **kwargs) -> None: + def __init__( + self, + *, + node_deactivation_intent: Optional[Union[str, "NodeDeactivationIntent"]] = None, + node_deactivation_status: Optional[Union[str, "NodeDeactivationStatus"]] = None, + node_deactivation_task: Optional[List["NodeDeactivationTask"]] = None, + pending_safety_checks: Optional[List["SafetyCheckWrapper"]] = None, + **kwargs + ): super(NodeDeactivationInfo, self).__init__(**kwargs) self.node_deactivation_intent = node_deactivation_intent self.node_deactivation_status = node_deactivation_status @@ -12648,18 +15394,16 @@ def __init__(self, *, node_deactivation_intent=None, node_deactivation_status=No self.pending_safety_checks = pending_safety_checks -class NodeDeactivationTask(Model): +class NodeDeactivationTask(msrest.serialization.Model): """The task representing the deactivation operation on the node. - :param node_deactivation_task_id: Identity of the task related to - deactivation operation on the node. - :type node_deactivation_task_id: - ~azure.servicefabric.models.NodeDeactivationTaskId - :param node_deactivation_intent: The intent or the reason for deactivating - the node. Following are the possible values for it. Possible values - include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' - :type node_deactivation_intent: str or - ~azure.servicefabric.models.NodeDeactivationIntent + :param node_deactivation_task_id: Identity of the task related to deactivation operation on the + node. + :type node_deactivation_task_id: ~azure.servicefabric.models.NodeDeactivationTaskId + :param node_deactivation_intent: The intent or the reason for deactivating the node. Following + are the possible values for it. Possible values include: "Invalid", "Pause", "Restart", + "RemoveData", "RemoveNode". + :type node_deactivation_intent: str or ~azure.servicefabric.models.NodeDeactivationIntent """ _attribute_map = { @@ -12667,22 +15411,27 @@ class NodeDeactivationTask(Model): 'node_deactivation_intent': {'key': 'NodeDeactivationIntent', 'type': 'str'}, } - def __init__(self, *, node_deactivation_task_id=None, node_deactivation_intent=None, **kwargs) -> None: + def __init__( + self, + *, + node_deactivation_task_id: Optional["NodeDeactivationTaskId"] = None, + node_deactivation_intent: Optional[Union[str, "NodeDeactivationIntent"]] = None, + **kwargs + ): super(NodeDeactivationTask, self).__init__(**kwargs) self.node_deactivation_task_id = node_deactivation_task_id self.node_deactivation_intent = node_deactivation_intent -class NodeDeactivationTaskId(Model): +class NodeDeactivationTaskId(msrest.serialization.Model): """Identity of the task related to deactivation operation on the node. :param id: Value of the task id. :type id: str - :param node_deactivation_task_type: The type of the task that performed - the node deactivation. Following are the possible values. Possible values - include: 'Invalid', 'Infrastructure', 'Repair', 'Client' - :type node_deactivation_task_type: str or - ~azure.servicefabric.models.NodeDeactivationTaskType + :param node_deactivation_task_type: The type of the task that performed the node deactivation. + Following are the possible values. Possible values include: "Invalid", "Infrastructure", + "Repair", "Client". + :type node_deactivation_task_type: str or ~azure.servicefabric.models.NodeDeactivationTaskType """ _attribute_map = { @@ -12690,7 +15439,13 @@ class NodeDeactivationTaskId(Model): 'node_deactivation_task_type': {'key': 'NodeDeactivationTaskType', 'type': 'str'}, } - def __init__(self, *, id: str=None, node_deactivation_task_type=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + node_deactivation_task_type: Optional[Union[str, "NodeDeactivationTaskType"]] = None, + **kwargs + ): super(NodeDeactivationTaskId, self).__init__(**kwargs) self.id = id self.node_deactivation_task_type = node_deactivation_task_type @@ -12701,75 +15456,103 @@ class NodeDownEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. :type node_instance: long :param last_node_up_at: Required. Time when Node was last up. - :type last_node_up_at: datetime + :type last_node_up_at: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'last_node_up_at': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'last_node_up_at': {'key': 'LastNodeUpAt', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, last_node_up_at, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance: int, + last_node_up_at: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeDownEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeDown' # type: str self.node_instance = node_instance self.last_node_up_at = last_node_up_at - self.kind = 'NodeDown' class NodeHealth(EntityHealth): """Information about the health of a Service Fabric node. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: Name of the node whose health information is described by - this object. + :param name: Name of the node whose health information is described by this object. :type name: str """ @@ -12781,37 +15564,47 @@ class NodeHealth(EntityHealth): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + name: Optional[str] = None, + **kwargs + ): super(NodeHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.name = name class NodeHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a node, containing information about the - data and the algorithm used by health store to evaluate health. The - evaluation is returned only when the aggregated health state is either - Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a node, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the node. The types of the - unhealthy evaluations can be EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the node. The types of the unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -12819,18 +15612,26 @@ class NodeHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, node_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + node_name: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(NodeHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Node' # type: str self.node_name = node_name self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Node' class NodeHealthReportExpiredEvent(NodeEvent): @@ -12838,18 +15639,38 @@ class NodeHealthReportExpiredEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -12866,17 +15687,16 @@ class NodeHealthReportExpiredEvent(NodeEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -12890,11 +15710,11 @@ class NodeHealthReportExpiredEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -12907,8 +15727,27 @@ class NodeHealthReportExpiredEvent(NodeEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeHealthReportExpired' # type: str self.node_instance_id = node_instance_id self.source_id = source_id self.property = property @@ -12918,23 +15757,19 @@ def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_i self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'NodeHealthReportExpired' class NodeHealthState(EntityHealthState): - """Represents the health state of a node, which contains the node identifier - and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + """Represents the health state of a node, which contains the node identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param name: The name of a Service Fabric node. :type name: str - :param id: An internal ID used by Service Fabric to uniquely identify a - node. Node Id is deterministically generated from node name. + :param id: An internal ID used by Service Fabric to uniquely identify a node. Node Id is + deterministically generated from node name. :type id: ~azure.servicefabric.models.NodeId """ @@ -12944,19 +15779,25 @@ class NodeHealthState(EntityHealthState): 'id': {'key': 'Id', 'type': 'NodeId'}, } - def __init__(self, *, aggregated_health_state=None, name: str=None, id=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + name: Optional[str] = None, + id: Optional["NodeId"] = None, + **kwargs + ): super(NodeHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.name = name self.id = id class NodeHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a node, which contains the node name - and its aggregated health state. + """Represents the health state chunk of a node, which contains the node name and its aggregated health state. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str @@ -12967,21 +15808,25 @@ class NodeHealthStateChunk(EntityHealthStateChunk): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__(self, *, health_state=None, node_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + node_name: Optional[str] = None, + **kwargs + ): super(NodeHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.node_name = node_name class NodeHealthStateChunkList(EntityHealthStateChunkList): - """The list of node health state chunks in the cluster that respect the input - filters in the chunk query. Returned by get cluster health state chunks - query. + """The list of node health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param total_count: Total number of entity health state objects that match - the specified filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match the specified + filters from the cluster health chunk query description. :type total_count: long - :param items: The list of node health state chunks that respect the input - filters in the chunk query. + :param items: The list of node health state chunks that respect the input filters in the chunk + query. :type items: list[~azure.servicefabric.models.NodeHealthStateChunk] """ @@ -12990,51 +15835,51 @@ class NodeHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[NodeHealthStateChunk]'}, } - def __init__(self, *, total_count: int=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + total_count: Optional[int] = None, + items: Optional[List["NodeHealthStateChunk"]] = None, + **kwargs + ): super(NodeHealthStateChunkList, self).__init__(total_count=total_count, **kwargs) self.items = items -class NodeHealthStateFilter(Model): - """Defines matching criteria to determine whether a node should be included in - the returned cluster health chunk. - One filter can match zero, one or multiple nodes, depending on its - properties. - Can be specified in the cluster health chunk query description. - - :param node_name_filter: Name of the node that matches the filter. The - filter is applied only to the specified node, if it exists. - If the node doesn't exist, no node is returned in the cluster health chunk - based on this filter. - If the node exists, it is included in the cluster health chunk if the - health state matches the other filter properties. - If not specified, all nodes that match the parent filters (if any) are - taken into consideration and matched against the other filter members, - like health state filter. +class NodeHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a node should be included in the returned cluster health chunk. +One filter can match zero, one or multiple nodes, depending on its properties. +Can be specified in the cluster health chunk query description. + + :param node_name_filter: Name of the node that matches the filter. The filter is applied only + to the specified node, if it exists. + If the node doesn't exist, no node is returned in the cluster health chunk based on this + filter. + If the node exists, it is included in the cluster health chunk if the health state matches the + other filter properties. + If not specified, all nodes that match the parent filters (if any) are taken into + consideration and matched against the other filter members, like health state filter. :type node_name_filter: str - :param health_state_filter: The filter for the health state of the nodes. - It allows selecting nodes if they match the desired health states. - The possible values are integer value of one of the following health - states. Only nodes that match the filter are returned. All nodes are used - to evaluate the cluster aggregated health state. - If not specified, default value is None, unless the node name is - specified. If the filter has default value and node name is specified, the - matching node is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches nodes with HealthState - value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the nodes. It allows selecting + nodes if they match the desired health states. + The possible values are integer value of one of the following health states. Only nodes that + match the filter are returned. All nodes are used to evaluate the cluster aggregated health + state. + If not specified, default value is None, unless the node name is specified. If the filter has + default value and node name is specified, the matching node is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches nodes with HealthState value of OK (2) and + Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int """ @@ -13043,15 +15888,20 @@ class NodeHealthStateFilter(Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__(self, *, node_name_filter: str=None, health_state_filter: int=0, **kwargs) -> None: + def __init__( + self, + *, + node_name_filter: Optional[str] = None, + health_state_filter: Optional[int] = 0, + **kwargs + ): super(NodeHealthStateFilter, self).__init__(**kwargs) self.node_name_filter = node_name_filter self.health_state_filter = health_state_filter -class NodeId(Model): - """An internal ID used by Service Fabric to uniquely identify a node. Node Id - is deterministically generated from node name. +class NodeId(msrest.serialization.Model): + """An internal ID used by Service Fabric to uniquely identify a node. Node Id is deterministically generated from node name. :param id: Value of the node Id. This is a 128 bit integer. :type id: str @@ -13061,22 +15911,27 @@ class NodeId(Model): 'id': {'key': 'Id', 'type': 'str'}, } - def __init__(self, *, id: str=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + **kwargs + ): super(NodeId, self).__init__(**kwargs) self.id = id -class NodeImpact(Model): +class NodeImpact(msrest.serialization.Model): """Describes the expected impact of a repair to a particular node. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param node_name: Required. The name of the impacted node. :type node_name: str - :param impact_level: The level of impact expected. Possible values - include: 'Invalid', 'None', 'Restart', 'RemoveData', 'RemoveNode' + :param impact_level: The level of impact expected. Possible values include: "Invalid", "None", + "Restart", "RemoveData", "RemoveNode". :type impact_level: str or ~azure.servicefabric.models.ImpactLevel """ @@ -13089,71 +15944,73 @@ class NodeImpact(Model): 'impact_level': {'key': 'ImpactLevel', 'type': 'str'}, } - def __init__(self, *, node_name: str, impact_level=None, **kwargs) -> None: + def __init__( + self, + *, + node_name: str, + impact_level: Optional[Union[str, "ImpactLevel"]] = None, + **kwargs + ): super(NodeImpact, self).__init__(**kwargs) self.node_name = node_name self.impact_level = impact_level -class NodeInfo(Model): +class NodeInfo(msrest.serialization.Model): """Information about a node in Service Fabric cluster. :param name: The name of a Service Fabric node. :type name: str - :param ip_address_or_fqdn: The IP address or fully qualified domain name - of the node. + :param ip_address_or_fqdn: The IP address or fully qualified domain name of the node. :type ip_address_or_fqdn: str :param type: The type of the node. :type type: str - :param code_version: The version of Service Fabric binaries that the node - is running. + :param code_version: The version of Service Fabric binaries that the node is running. :type code_version: str - :param config_version: The version of Service Fabric cluster manifest that - the node is using. + :param config_version: The version of Service Fabric cluster manifest that the node is using. :type config_version: str - :param node_status: The status of the node. Possible values include: - 'Invalid', 'Up', 'Down', 'Enabling', 'Disabling', 'Disabled', 'Unknown', - 'Removed' + :param node_status: The status of the node. Possible values include: "Invalid", "Up", "Down", + "Enabling", "Disabling", "Disabled", "Unknown", "Removed". :type node_status: str or ~azure.servicefabric.models.NodeStatus - :param node_up_time_in_seconds: Time in seconds since the node has been in - NodeStatus Up. Value zero indicates that the node is not Up. + :param node_up_time_in_seconds: Time in seconds since the node has been in NodeStatus Up. Value + zero indicates that the node is not Up. :type node_up_time_in_seconds: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param is_seed_node: Indicates if the node is a seed node or not. Returns - true if the node is a seed node, otherwise false. A quorum of seed nodes - are required for proper operation of Service Fabric cluster. + :param is_seed_node: Indicates if the node is a seed node or not. Returns true if the node is a + seed node, otherwise false. A quorum of seed nodes are required for proper operation of Service + Fabric cluster. :type is_seed_node: bool :param upgrade_domain: The upgrade domain of the node. :type upgrade_domain: str :param fault_domain: The fault domain of the node. :type fault_domain: str - :param id: An internal ID used by Service Fabric to uniquely identify a - node. Node Id is deterministically generated from node name. + :param id: An internal ID used by Service Fabric to uniquely identify a node. Node Id is + deterministically generated from node name. :type id: ~azure.servicefabric.models.NodeId - :param instance_id: The ID representing the node instance. While the ID of - the node is deterministically generated from the node name and remains - same across restarts, the InstanceId changes every time node restarts. + :param instance_id: The ID representing the node instance. While the ID of the node is + deterministically generated from the node name and remains same across restarts, the InstanceId + changes every time node restarts. :type instance_id: str - :param node_deactivation_info: Information about the node deactivation. - This information is valid for a node that is undergoing deactivation or - has already been deactivated. - :type node_deactivation_info: - ~azure.servicefabric.models.NodeDeactivationInfo - :param is_stopped: Indicates if the node is stopped by calling stop node - API or not. Returns true if the node is stopped, otherwise false. + :param node_deactivation_info: Information about the node deactivation. This information is + valid for a node that is undergoing deactivation or has already been deactivated. + :type node_deactivation_info: ~azure.servicefabric.models.NodeDeactivationInfo + :param is_stopped: Indicates if the node is stopped by calling stop node API or not. Returns + true if the node is stopped, otherwise false. :type is_stopped: bool - :param node_down_time_in_seconds: Time in seconds since the node has been - in NodeStatus Down. Value zero indicates node is not NodeStatus Down. + :param node_down_time_in_seconds: Time in seconds since the node has been in NodeStatus Down. + Value zero indicates node is not NodeStatus Down. :type node_down_time_in_seconds: str - :param node_up_at: Date time in UTC when the node came up. If the node has - never been up then this value will be zero date time. - :type node_up_at: datetime - :param node_down_at: Date time in UTC when the node went down. If node has - never been down then this value will be zero date time. - :type node_down_at: datetime + :param node_up_at: Date time in UTC when the node came up. If the node has never been up then + this value will be zero date time. + :type node_up_at: ~datetime.datetime + :param node_down_at: Date time in UTC when the node went down. If node has never been down then + this value will be zero date time. + :type node_down_at: ~datetime.datetime + :param node_tags: List that contains tags, which will be applied to the nodes. + :type node_tags: list[str] """ _attribute_map = { @@ -13175,9 +16032,33 @@ class NodeInfo(Model): 'node_down_time_in_seconds': {'key': 'NodeDownTimeInSeconds', 'type': 'str'}, 'node_up_at': {'key': 'NodeUpAt', 'type': 'iso-8601'}, 'node_down_at': {'key': 'NodeDownAt', 'type': 'iso-8601'}, - } - - def __init__(self, *, name: str=None, ip_address_or_fqdn: str=None, type: str=None, code_version: str=None, config_version: str=None, node_status=None, node_up_time_in_seconds: str=None, health_state=None, is_seed_node: bool=None, upgrade_domain: str=None, fault_domain: str=None, id=None, instance_id: str=None, node_deactivation_info=None, is_stopped: bool=None, node_down_time_in_seconds: str=None, node_up_at=None, node_down_at=None, **kwargs) -> None: + 'node_tags': {'key': 'NodeTags', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + ip_address_or_fqdn: Optional[str] = None, + type: Optional[str] = None, + code_version: Optional[str] = None, + config_version: Optional[str] = None, + node_status: Optional[Union[str, "NodeStatus"]] = None, + node_up_time_in_seconds: Optional[str] = None, + health_state: Optional[Union[str, "HealthState"]] = None, + is_seed_node: Optional[bool] = None, + upgrade_domain: Optional[str] = None, + fault_domain: Optional[str] = None, + id: Optional["NodeId"] = None, + instance_id: Optional[str] = None, + node_deactivation_info: Optional["NodeDeactivationInfo"] = None, + is_stopped: Optional[bool] = None, + node_down_time_in_seconds: Optional[str] = None, + node_up_at: Optional[datetime.datetime] = None, + node_down_at: Optional[datetime.datetime] = None, + node_tags: Optional[List[str]] = None, + **kwargs + ): super(NodeInfo, self).__init__(**kwargs) self.name = name self.ip_address_or_fqdn = ip_address_or_fqdn @@ -13197,19 +16078,17 @@ def __init__(self, *, name: str=None, ip_address_or_fqdn: str=None, type: str=No self.node_down_time_in_seconds = node_down_time_in_seconds self.node_up_at = node_up_at self.node_down_at = node_down_at + self.node_tags = node_tags -class NodeLoadInfo(Model): - """Information about load on a Service Fabric node. It holds a summary of all - metrics and their load on a node. +class NodeLoadInfo(msrest.serialization.Model): + """Information about load on a Service Fabric node. It holds a summary of all metrics and their load on a node. - :param node_name: Name of the node for which the load information is - provided by this object. + :param node_name: Name of the node for which the load information is provided by this object. :type node_name: str - :param node_load_metric_information: List that contains metrics and their - load information on this node. - :type node_load_metric_information: - list[~azure.servicefabric.models.NodeLoadMetricInformation] + :param node_load_metric_information: List that contains metrics and their load information on + this node. + :type node_load_metric_information: list[~azure.servicefabric.models.NodeLoadMetricInformation] """ _attribute_map = { @@ -13217,52 +16096,52 @@ class NodeLoadInfo(Model): 'node_load_metric_information': {'key': 'NodeLoadMetricInformation', 'type': '[NodeLoadMetricInformation]'}, } - def __init__(self, *, node_name: str=None, node_load_metric_information=None, **kwargs) -> None: + def __init__( + self, + *, + node_name: Optional[str] = None, + node_load_metric_information: Optional[List["NodeLoadMetricInformation"]] = None, + **kwargs + ): super(NodeLoadInfo, self).__init__(**kwargs) self.node_name = node_name self.node_load_metric_information = node_load_metric_information -class NodeLoadMetricInformation(Model): - """Represents data structure that contains load information for a certain - metric on a node. +class NodeLoadMetricInformation(msrest.serialization.Model): + """Represents data structure that contains load information for a certain metric on a node. - :param name: Name of the metric for which this load information is - provided. + :param name: Name of the metric for which this load information is provided. :type name: str :param node_capacity: Total capacity on the node for this metric. :type node_capacity: str - :param node_load: Current load on the node for this metric. In future - releases of Service Fabric this parameter will be deprecated in favor of - CurrentNodeLoad. + :param node_load: Current load on the node for this metric. In future releases of Service + Fabric this parameter will be deprecated in favor of CurrentNodeLoad. :type node_load: str - :param node_remaining_capacity: The remaining capacity on the node for - this metric. In future releases of Service Fabric this parameter will be - deprecated in favor of NodeCapacityRemaining. + :param node_remaining_capacity: The remaining capacity on the node for this metric. In future + releases of Service Fabric this parameter will be deprecated in favor of NodeCapacityRemaining. :type node_remaining_capacity: str - :param is_capacity_violation: Indicates if there is a capacity violation - for this metric on the node. + :param is_capacity_violation: Indicates if there is a capacity violation for this metric on the + node. :type is_capacity_violation: bool - :param node_buffered_capacity: The value that indicates the reserved - capacity for this metric on the node. + :param node_buffered_capacity: The value that indicates the reserved capacity for this metric + on the node. :type node_buffered_capacity: str - :param node_remaining_buffered_capacity: The remaining reserved capacity - for this metric on the node. In future releases of Service Fabric this - parameter will be deprecated in favor of BufferedNodeCapacityRemaining. + :param node_remaining_buffered_capacity: The remaining reserved capacity for this metric on the + node. In future releases of Service Fabric this parameter will be deprecated in favor of + BufferedNodeCapacityRemaining. :type node_remaining_buffered_capacity: str :param current_node_load: Current load on the node for this metric. :type current_node_load: str - :param node_capacity_remaining: The remaining capacity on the node for the - metric. + :param node_capacity_remaining: The remaining capacity on the node for the metric. :type node_capacity_remaining: str - :param buffered_node_capacity_remaining: The remaining capacity which is - not reserved by NodeBufferPercentage for this metric on the node. + :param buffered_node_capacity_remaining: The remaining capacity which is not reserved by + NodeBufferPercentage for this metric on the node. :type buffered_node_capacity_remaining: str - :param planned_node_load_removal: This value represents the load of the - replicas that are planned to be removed in the future. - This kind of load is reported for replicas that are currently being moving - to other nodes and for replicas that are currently being dropped but still - use the load on the source node. + :param planned_node_load_removal: This value represents the load of the replicas that are + planned to be removed in the future. + This kind of load is reported for replicas that are currently being moving to other nodes and + for replicas that are currently being dropped but still use the load on the source node. :type planned_node_load_removal: str """ @@ -13280,7 +16159,22 @@ class NodeLoadMetricInformation(Model): 'planned_node_load_removal': {'key': 'PlannedNodeLoadRemoval', 'type': 'str'}, } - def __init__(self, *, name: str=None, node_capacity: str=None, node_load: str=None, node_remaining_capacity: str=None, is_capacity_violation: bool=None, node_buffered_capacity: str=None, node_remaining_buffered_capacity: str=None, current_node_load: str=None, node_capacity_remaining: str=None, buffered_node_capacity_remaining: str=None, planned_node_load_removal: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + node_capacity: Optional[str] = None, + node_load: Optional[str] = None, + node_remaining_capacity: Optional[str] = None, + is_capacity_violation: Optional[bool] = None, + node_buffered_capacity: Optional[str] = None, + node_remaining_buffered_capacity: Optional[str] = None, + current_node_load: Optional[str] = None, + node_capacity_remaining: Optional[str] = None, + buffered_node_capacity_remaining: Optional[str] = None, + planned_node_load_removal: Optional[str] = None, + **kwargs + ): super(NodeLoadMetricInformation, self).__init__(**kwargs) self.name = name self.node_capacity = node_capacity @@ -13300,18 +16194,38 @@ class NodeNewHealthReportEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -13328,17 +16242,16 @@ class NodeNewHealthReportEvent(NodeEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -13352,11 +16265,11 @@ class NodeNewHealthReportEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -13369,8 +16282,27 @@ class NodeNewHealthReportEvent(NodeEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeNewHealthReport' # type: str self.node_instance_id = node_instance_id self.source_id = source_id self.property = property @@ -13380,7 +16312,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_i self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'NodeNewHealthReport' class NodeOpenFailedEvent(NodeEvent): @@ -13388,18 +16319,38 @@ class NodeOpenFailedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -13423,9 +16374,9 @@ class NodeOpenFailedEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -13439,11 +16390,11 @@ class NodeOpenFailedEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -13456,8 +16407,27 @@ class NodeOpenFailedEvent(NodeEvent): 'error': {'key': 'Error', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, error: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance: int, + node_id: str, + upgrade_domain: str, + fault_domain: str, + ip_address_or_fqdn: str, + hostname: str, + is_seed_node: bool, + node_version: str, + error: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeOpenFailedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeOpenFailed' # type: str self.node_instance = node_instance self.node_id = node_id self.upgrade_domain = upgrade_domain @@ -13467,7 +16437,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_i self.is_seed_node = is_seed_node self.node_version = node_version self.error = error - self.kind = 'NodeOpenFailed' class NodeOpenSucceededEvent(NodeEvent): @@ -13475,18 +16444,38 @@ class NodeOpenSucceededEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -13508,9 +16497,9 @@ class NodeOpenSucceededEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -13523,11 +16512,11 @@ class NodeOpenSucceededEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -13539,8 +16528,26 @@ class NodeOpenSucceededEvent(NodeEvent): 'node_version': {'key': 'NodeVersion', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance: int, + node_id: str, + upgrade_domain: str, + fault_domain: str, + ip_address_or_fqdn: str, + hostname: str, + is_seed_node: bool, + node_version: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeOpenSucceededEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeOpenSucceeded' # type: str self.node_instance = node_instance self.node_id = node_id self.upgrade_domain = upgrade_domain @@ -13549,7 +16556,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_i self.hostname = hostname self.is_seed_node = is_seed_node self.node_version = node_version - self.kind = 'NodeOpenSucceeded' class NodeRemovedFromClusterEvent(NodeEvent): @@ -13557,18 +16563,38 @@ class NodeRemovedFromClusterEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -13586,9 +16612,9 @@ class NodeRemovedFromClusterEvent(NodeEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -13599,11 +16625,11 @@ class NodeRemovedFromClusterEvent(NodeEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, @@ -13613,29 +16639,45 @@ class NodeRemovedFromClusterEvent(NodeEvent): 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: int, node_type: str, fabric_version: str, ip_address_or_fqdn: str, node_capacities: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_id: str, + node_instance: int, + node_type: str, + fabric_version: str, + ip_address_or_fqdn: str, + node_capacities: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeRemovedFromClusterEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeRemovedFromCluster' # type: str self.node_id = node_id self.node_instance = node_instance self.node_type = node_type self.fabric_version = fabric_version self.ip_address_or_fqdn = ip_address_or_fqdn self.node_capacities = node_capacities - self.kind = 'NodeRemovedFromCluster' -class RepairImpactDescriptionBase(Model): +class RepairImpactDescriptionBase(msrest.serialization.Model): """Describes the expected impact of executing a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeRepairImpactDescription + sub-classes are: NodeRepairImpactDescription. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of repair impact represented by the current object.Constant + filled by server. Possible values include: "Invalid", "Node". + :type kind: str or ~azure.servicefabric.models.RepairImpactKind """ _validation = { @@ -13650,22 +16692,26 @@ class RepairImpactDescriptionBase(Model): 'kind': {'Node': 'NodeRepairImpactDescription'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(RepairImpactDescriptionBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class NodeRepairImpactDescription(RepairImpactDescriptionBase): """Describes the expected impact of a repair on a set of nodes. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param node_impact_list: The list of nodes impacted by a repair action and - their respective expected impact. + :param kind: Required. The kind of repair impact represented by the current object.Constant + filled by server. Possible values include: "Invalid", "Node". + :type kind: str or ~azure.servicefabric.models.RepairImpactKind + :param node_impact_list: The list of nodes impacted by a repair action and their respective + expected impact. :type node_impact_list: list[~azure.servicefabric.models.NodeImpact] """ @@ -13678,24 +16724,30 @@ class NodeRepairImpactDescription(RepairImpactDescriptionBase): 'node_impact_list': {'key': 'NodeImpactList', 'type': '[NodeImpact]'}, } - def __init__(self, *, node_impact_list=None, **kwargs) -> None: + def __init__( + self, + *, + node_impact_list: Optional[List["NodeImpact"]] = None, + **kwargs + ): super(NodeRepairImpactDescription, self).__init__(**kwargs) + self.kind = 'Node' # type: str self.node_impact_list = node_impact_list - self.kind = 'Node' -class RepairTargetDescriptionBase(Model): +class RepairTargetDescriptionBase(msrest.serialization.Model): """Describes the entities targeted by a repair action. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeRepairTargetDescription + sub-classes are: NodeRepairTargetDescription. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of repair target described by the current object.Constant + filled by server. Possible values include: "Invalid", "Node". + :type kind: str or ~azure.servicefabric.models.RepairTargetKind """ _validation = { @@ -13710,20 +16762,24 @@ class RepairTargetDescriptionBase(Model): 'kind': {'Node': 'NodeRepairTargetDescription'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(RepairTargetDescriptionBase, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class NodeRepairTargetDescription(RepairTargetDescriptionBase): """Describes the list of nodes targeted by a repair action. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of repair target described by the current object.Constant + filled by server. Possible values include: "Invalid", "Node". + :type kind: str or ~azure.servicefabric.models.RepairTargetKind :param node_names: The list of nodes targeted by a repair action. :type node_names: list[str] """ @@ -13737,15 +16793,19 @@ class NodeRepairTargetDescription(RepairTargetDescriptionBase): 'node_names': {'key': 'NodeNames', 'type': '[str]'}, } - def __init__(self, *, node_names=None, **kwargs) -> None: + def __init__( + self, + *, + node_names: Optional[List[str]] = None, + **kwargs + ): super(NodeRepairTargetDescription, self).__init__(**kwargs) + self.kind = 'Node' # type: str self.node_names = node_names - self.kind = 'Node' -class NodeResult(Model): - """Contains information about a node that was targeted by a user-induced - operation. +class NodeResult(msrest.serialization.Model): + """Contains information about a node that was targeted by a user-induced operation. :param node_name: The name of a Service Fabric node. :type node_name: str @@ -13758,41 +16818,48 @@ class NodeResult(Model): 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, } - def __init__(self, *, node_name: str=None, node_instance_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + node_name: Optional[str] = None, + node_instance_id: Optional[str] = None, + **kwargs + ): super(NodeResult, self).__init__(**kwargs) self.node_name = node_name self.node_instance_id = node_instance_id class NodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for nodes, containing health evaluations for - each unhealthy node that impacted current aggregated health state. Can be - returned when evaluating cluster health and the aggregated health state is - either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for nodes, containing health evaluations for each unhealthy node that impacted current aggregated health state. Can be returned when evaluating cluster health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of - unhealthy nodes from the ClusterHealthPolicy. + :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes from the + ClusterHealthPolicy. :type max_percent_unhealthy_nodes: int :param total_count: Total number of nodes found in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -13800,36 +16867,75 @@ class NodesHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + max_percent_unhealthy_nodes: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(NodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Nodes' # type: str self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Nodes' -class NodeTransitionProgress(Model): - """Information about an NodeTransition operation. This class contains an - OperationState and a NodeTransitionResult. The NodeTransitionResult is not - valid until OperationState - is Completed or Faulted. +class NodeTagsDescription(msrest.serialization.Model): + """Describes the tags required for placement or running of the service. - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' + All required parameters must be populated in order to send to Azure. + + :param count: Required. The number of tags. + :type count: int + :param tags: Required. A set of tags. Array of size specified by the ‘Count’ parameter, for the + placement tags of the service. + :type tags: list[str] + """ + + _validation = { + 'count': {'required': True}, + 'tags': {'required': True}, + } + + _attribute_map = { + 'count': {'key': 'Count', 'type': 'int'}, + 'tags': {'key': 'Tags', 'type': '[str]'}, + } + + def __init__( + self, + *, + count: int, + tags: List[str], + **kwargs + ): + super(NodeTagsDescription, self).__init__(**kwargs) + self.count = count + self.tags = tags + + +class NodeTransitionProgress(msrest.serialization.Model): + """Information about an NodeTransition operation. This class contains an OperationState and a NodeTransitionResult. The NodeTransitionResult is not valid until OperationState +is Completed or Faulted. + + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param node_transition_result: Represents information about an operation - in a terminal state (Completed or Faulted). - :type node_transition_result: - ~azure.servicefabric.models.NodeTransitionResult + :param node_transition_result: Represents information about an operation in a terminal state + (Completed or Faulted). + :type node_transition_result: ~azure.servicefabric.models.NodeTransitionResult """ _attribute_map = { @@ -13837,21 +16943,26 @@ class NodeTransitionProgress(Model): 'node_transition_result': {'key': 'NodeTransitionResult', 'type': 'NodeTransitionResult'}, } - def __init__(self, *, state=None, node_transition_result=None, **kwargs) -> None: + def __init__( + self, + *, + state: Optional[Union[str, "OperationState"]] = None, + node_transition_result: Optional["NodeTransitionResult"] = None, + **kwargs + ): super(NodeTransitionProgress, self).__init__(**kwargs) self.state = state self.node_transition_result = node_transition_result -class NodeTransitionResult(Model): - """Represents information about an operation in a terminal state (Completed or - Faulted). +class NodeTransitionResult(msrest.serialization.Model): + """Represents information about an operation in a terminal state (Completed or Faulted). - :param error_code: If OperationState is Completed, this is 0. If - OperationState is Faulted, this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, + this is an error code indicating the reason. :type error_code: int - :param node_result: Contains information about a node that was targeted by - a user-induced operation. + :param node_result: Contains information about a node that was targeted by a user-induced + operation. :type node_result: ~azure.servicefabric.models.NodeResult """ @@ -13860,76 +16971,219 @@ class NodeTransitionResult(Model): 'node_result': {'key': 'NodeResult', 'type': 'NodeResult'}, } - def __init__(self, *, error_code: int=None, node_result=None, **kwargs) -> None: + def __init__( + self, + *, + error_code: Optional[int] = None, + node_result: Optional["NodeResult"] = None, + **kwargs + ): super(NodeTransitionResult, self).__init__(**kwargs) self.error_code = error_code self.node_result = node_result +class NodeTypeHealthPolicyMapItem(msrest.serialization.Model): + """Defines an item in NodeTypeHealthPolicyMap. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key of the node type health policy map item. This is the name of the + node type. + :type key: str + :param value: Required. The value of the node type health policy map item. + If the percentage is respected but there is at least one unhealthy node in the node type, the + health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy nodes over the total number + of nodes in the node type. + The computation rounds up to tolerate one failure on small numbers of nodes. + The max percent unhealthy nodes allowed for the node type. Must be between zero and 100. + :type value: int + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'int'}, + } + + def __init__( + self, + *, + key: str, + value: int, + **kwargs + ): + super(NodeTypeHealthPolicyMapItem, self).__init__(**kwargs) + self.key = key + self.value = value + + +class NodeTypeNodesHealthEvaluation(HealthEvaluation): + """Represents health evaluation for nodes of a particular node type. The node type nodes evaluation can be returned when cluster health evaluation returns unhealthy aggregated health state, either Error or Warning. It contains health evaluations for each unhealthy node of the included node type that impacted current aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. + :type description: str + :param node_type_name: The node type name as defined in the cluster manifest. + :type node_type_name: str + :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes for the node + type, specified as an entry in NodeTypeHealthPolicyMap. + :type max_percent_unhealthy_nodes: int + :param total_count: Total number of nodes of the node type found in the health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy NodeHealthEvaluation of this node type that impacted the + aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'node_type_name': {'key': 'NodeTypeName', 'type': 'str'}, + 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + node_type_name: Optional[str] = None, + max_percent_unhealthy_nodes: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): + super(NodeTypeNodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'NodeTypeNodes' # type: str + self.node_type_name = node_type_name + self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + + class NodeUpEvent(NodeEvent): """Node Up event. All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. :type node_instance: long :param last_node_down_at: Required. Time when Node was last down. - :type last_node_down_at: datetime + :type last_node_down_at: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'last_node_down_at': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'last_node_down_at': {'key': 'LastNodeDownAt', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, last_node_down_at, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + node_name: str, + node_instance: int, + last_node_down_at: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(NodeUpEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.kind = 'NodeUp' # type: str self.node_instance = node_instance self.last_node_down_at = last_node_down_at - self.kind = 'NodeUp' -class NodeUpgradeProgressInfo(Model): +class NodeUpgradeProgressInfo(msrest.serialization.Model): """Information about the upgrading node and its status. :param node_name: The name of a Service Fabric node. :type node_name: str - :param upgrade_phase: The state of the upgrading node. Possible values - include: 'Invalid', 'PreUpgradeSafetyCheck', 'Upgrading', - 'PostUpgradeSafetyCheck' + :param upgrade_phase: The state of the upgrading node. Possible values include: "Invalid", + "PreUpgradeSafetyCheck", "Upgrading", "PostUpgradeSafetyCheck". :type upgrade_phase: str or ~azure.servicefabric.models.NodeUpgradePhase - :param pending_safety_checks: List of pending safety checks - :type pending_safety_checks: - list[~azure.servicefabric.models.SafetyCheckWrapper] + :param pending_safety_checks: List of pending safety checks. + :type pending_safety_checks: list[~azure.servicefabric.models.SafetyCheckWrapper] """ _attribute_map = { @@ -13938,27 +17192,31 @@ class NodeUpgradeProgressInfo(Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__(self, *, node_name: str=None, upgrade_phase=None, pending_safety_checks=None, **kwargs) -> None: + def __init__( + self, + *, + node_name: Optional[str] = None, + upgrade_phase: Optional[Union[str, "NodeUpgradePhase"]] = None, + pending_safety_checks: Optional[List["SafetyCheckWrapper"]] = None, + **kwargs + ): super(NodeUpgradeProgressInfo, self).__init__(**kwargs) self.node_name = node_name self.upgrade_phase = upgrade_phase self.pending_safety_checks = pending_safety_checks -class OperationStatus(Model): - """Contains the OperationId, OperationState, and OperationType for - user-induced operations. +class OperationStatus(msrest.serialization.Model): + """Contains the OperationId, OperationState, and OperationType for user-induced operations. - :param operation_id: A GUID that identifies a call to this API. This is - also passed into the corresponding GetProgress API. + :param operation_id: A GUID that identifies a call to this API. This is also passed into the + corresponding GetProgress API. :type operation_id: str - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param type: The type of the operation. Possible values include: - 'Invalid', 'PartitionDataLoss', 'PartitionQuorumLoss', 'PartitionRestart', - 'NodeTransition' + :param type: The type of the operation. Possible values include: "Invalid", + "PartitionDataLoss", "PartitionQuorumLoss", "PartitionRestart", "NodeTransition". :type type: str or ~azure.servicefabric.models.OperationType """ @@ -13968,25 +17226,30 @@ class OperationStatus(Model): 'type': {'key': 'Type', 'type': 'str'}, } - def __init__(self, *, operation_id: str=None, state=None, type=None, **kwargs) -> None: + def __init__( + self, + *, + operation_id: Optional[str] = None, + state: Optional[Union[str, "OperationState"]] = None, + type: Optional[Union[str, "OperationType"]] = None, + **kwargs + ): super(OperationStatus, self).__init__(**kwargs) self.operation_id = operation_id self.state = state self.type = type -class PackageSharingPolicyInfo(Model): +class PackageSharingPolicyInfo(msrest.serialization.Model): """Represents a policy for the package sharing. - :param shared_package_name: The name of code, configuration or data - package that should be shared. + :param shared_package_name: The name of code, configuration or data package that should be + shared. :type shared_package_name: str - :param package_sharing_scope: Represents the scope for - PackageSharingPolicy. This is specified during DeployServicePackageToNode - operation. Possible values include: 'None', 'All', 'Code', 'Config', - 'Data' - :type package_sharing_scope: str or - ~azure.servicefabric.models.PackageSharingPolicyScope + :param package_sharing_scope: Represents the scope for PackageSharingPolicy. This is specified + during DeployServicePackageToNode operation. Possible values include: "None", "All", "Code", + "Config", "Data". + :type package_sharing_scope: str or ~azure.servicefabric.models.PackageSharingPolicyScope """ _attribute_map = { @@ -13994,24 +17257,26 @@ class PackageSharingPolicyInfo(Model): 'package_sharing_scope': {'key': 'PackageSharingScope', 'type': 'str'}, } - def __init__(self, *, shared_package_name: str=None, package_sharing_scope=None, **kwargs) -> None: + def __init__( + self, + *, + shared_package_name: Optional[str] = None, + package_sharing_scope: Optional[Union[str, "PackageSharingPolicyScope"]] = None, + **kwargs + ): super(PackageSharingPolicyInfo, self).__init__(**kwargs) self.shared_package_name = shared_package_name self.package_sharing_scope = package_sharing_scope -class PagedApplicationInfoList(Model): - """The list of applications in the cluster. The list is paged when all of the - results cannot fit in a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in this list. +class PagedApplicationInfoList(msrest.serialization.Model): + """The list of applications in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of application information. :type items: list[~azure.servicefabric.models.ApplicationInfo] @@ -14022,28 +17287,29 @@ class PagedApplicationInfoList(Model): 'items': {'key': 'Items', 'type': '[ApplicationInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["ApplicationInfo"]] = None, + **kwargs + ): super(PagedApplicationInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedApplicationResourceDescriptionList(Model): - """The list of application resources. The list is paged when all of the - results cannot fit in a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in this list. +class PagedApplicationResourceDescriptionList(msrest.serialization.Model): + """The list of application resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. - :type items: - list[~azure.servicefabric.models.ApplicationResourceDescription] + :type items: list[~azure.servicefabric.models.ApplicationResourceDescription] """ _attribute_map = { @@ -14051,24 +17317,26 @@ class PagedApplicationResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[ApplicationResourceDescription]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["ApplicationResourceDescription"]] = None, + **kwargs + ): super(PagedApplicationResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedApplicationTypeInfoList(Model): - """The list of application types that are provisioned or being provisioned in - the cluster. The list is paged when all of the results cannot fit in a - single message. The next set of results can be obtained by executing the - same query with the continuation token provided in this list. +class PagedApplicationTypeInfoList(msrest.serialization.Model): + """The list of application types that are provisioned or being provisioned in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of application type information. :type items: list[~azure.servicefabric.models.ApplicationTypeInfo] @@ -14079,24 +17347,26 @@ class PagedApplicationTypeInfoList(Model): 'items': {'key': 'Items', 'type': '[ApplicationTypeInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["ApplicationTypeInfo"]] = None, + **kwargs + ): super(PagedApplicationTypeInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedBackupConfigurationInfoList(Model): - """The list of backup configuration information. The list is paged when all of - the results cannot fit in a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in this list. +class PagedBackupConfigurationInfoList(msrest.serialization.Model): + """The list of backup configuration information. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of backup configuration information. :type items: list[~azure.servicefabric.models.BackupConfigurationInfo] @@ -14107,24 +17377,26 @@ class PagedBackupConfigurationInfoList(Model): 'items': {'key': 'Items', 'type': '[BackupConfigurationInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["BackupConfigurationInfo"]] = None, + **kwargs + ): super(PagedBackupConfigurationInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedBackupEntityList(Model): - """The list of backup entities that are being periodically backed. The list is - paged when all of the results cannot fit in a single message. The next set - of results can be obtained by executing the same query with the - continuation token provided in this list. +class PagedBackupEntityList(msrest.serialization.Model): + """The list of backup entities that are being periodically backed. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of backup entity information. :type items: list[~azure.servicefabric.models.BackupEntity] @@ -14135,23 +17407,26 @@ class PagedBackupEntityList(Model): 'items': {'key': 'Items', 'type': '[BackupEntity]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["BackupEntity"]] = None, + **kwargs + ): super(PagedBackupEntityList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedBackupInfoList(Model): - """The list of backups. The list is paged when all of the results cannot fit - in a single message. The next set of results can be obtained by executing - the same query with the continuation token provided in this list. +class PagedBackupInfoList(msrest.serialization.Model): + """The list of backups. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of backup information. :type items: list[~azure.servicefabric.models.BackupInfo] @@ -14162,24 +17437,26 @@ class PagedBackupInfoList(Model): 'items': {'key': 'Items', 'type': '[BackupInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["BackupInfo"]] = None, + **kwargs + ): super(PagedBackupInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedBackupPolicyDescriptionList(Model): - """The list of backup policies configured in the cluster. The list is paged - when all of the results cannot fit in a single message. The next set of - results can be obtained by executing the same query with the continuation - token provided in this list. +class PagedBackupPolicyDescriptionList(msrest.serialization.Model): + """The list of backup policies configured in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: The list of backup policies information. :type items: list[~azure.servicefabric.models.BackupPolicyDescription] @@ -14190,24 +17467,26 @@ class PagedBackupPolicyDescriptionList(Model): 'items': {'key': 'Items', 'type': '[BackupPolicyDescription]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["BackupPolicyDescription"]] = None, + **kwargs + ): super(PagedBackupPolicyDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedComposeDeploymentStatusInfoList(Model): - """The list of compose deployments in the cluster. The list is paged when all - of the results cannot fit in a single message. The next set of results can - be obtained by executing the same query with the continuation token - provided in this list. +class PagedComposeDeploymentStatusInfoList(msrest.serialization.Model): + """The list of compose deployments in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of compose deployment status information. :type items: list[~azure.servicefabric.models.ComposeDeploymentStatusInfo] @@ -14218,25 +17497,28 @@ class PagedComposeDeploymentStatusInfoList(Model): 'items': {'key': 'Items', 'type': '[ComposeDeploymentStatusInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["ComposeDeploymentStatusInfo"]] = None, + **kwargs + ): super(PagedComposeDeploymentStatusInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedDeployedApplicationInfoList(Model): - """The list of deployed applications in activating, downloading, or active - states on a node. - The list is paged when all of the results cannot fit in a single message. - The next set of results can be obtained by executing the same query with - the continuation token provided in this list. +class PagedDeployedApplicationInfoList(msrest.serialization.Model): + """The list of deployed applications in activating, downloading, or active states on a node. +The list is paged when all of the results cannot fit in a single message. +The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of deployed application information. :type items: list[~azure.servicefabric.models.DeployedApplicationInfo] @@ -14247,23 +17529,26 @@ class PagedDeployedApplicationInfoList(Model): 'items': {'key': 'Items', 'type': '[DeployedApplicationInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["DeployedApplicationInfo"]] = None, + **kwargs + ): super(PagedDeployedApplicationInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedGatewayResourceDescriptionList(Model): - """The list of gateway resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedGatewayResourceDescriptionList(msrest.serialization.Model): + """The list of gateway resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.GatewayResourceDescription] @@ -14274,23 +17559,26 @@ class PagedGatewayResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[GatewayResourceDescription]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["GatewayResourceDescription"]] = None, + **kwargs + ): super(PagedGatewayResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedNetworkResourceDescriptionList(Model): - """The list of network resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedNetworkResourceDescriptionList(msrest.serialization.Model): + """The list of network resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.NetworkResourceDescription] @@ -14301,23 +17589,26 @@ class PagedNetworkResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[NetworkResourceDescription]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["NetworkResourceDescription"]] = None, + **kwargs + ): super(PagedNetworkResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedNodeInfoList(Model): - """The list of nodes in the cluster. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedNodeInfoList(msrest.serialization.Model): + """The list of nodes in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of node information. :type items: list[~azure.servicefabric.models.NodeInfo] @@ -14328,28 +17619,29 @@ class PagedNodeInfoList(Model): 'items': {'key': 'Items', 'type': '[NodeInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["NodeInfo"]] = None, + **kwargs + ): super(PagedNodeInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedPropertyInfoList(Model): - """The paged list of Service Fabric properties under a given name. The list is - paged when all of the results cannot fit in a single message. The next set - of results can be obtained by executing the same query with the - continuation token provided in this list. +class PagedPropertyInfoList(msrest.serialization.Model): + """The paged list of Service Fabric properties under a given name. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str - :param is_consistent: Indicates whether any property under the given name - has been modified during the enumeration. If there was a modification, - this property value is false. + :param is_consistent: Indicates whether any property under the given name has been modified + during the enumeration. If there was a modification, this property value is false. :type is_consistent: bool :param properties: List of property information. :type properties: list[~azure.servicefabric.models.PropertyInfo] @@ -14361,25 +17653,28 @@ class PagedPropertyInfoList(Model): 'properties': {'key': 'Properties', 'type': '[PropertyInfo]'}, } - def __init__(self, *, continuation_token: str=None, is_consistent: bool=None, properties=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + is_consistent: Optional[bool] = None, + properties: Optional[List["PropertyInfo"]] = None, + **kwargs + ): super(PagedPropertyInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.is_consistent = is_consistent self.properties = properties -class PagedReplicaInfoList(Model): - """The list of replicas in the cluster for a given partition. The list is - paged when all of the results cannot fit in a single message. The next set - of results can be obtained by executing the same query with the - continuation token provided in this list. +class PagedReplicaInfoList(msrest.serialization.Model): + """The list of replicas in the cluster for a given partition. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of replica information. :type items: list[~azure.servicefabric.models.ReplicaInfo] @@ -14390,23 +17685,26 @@ class PagedReplicaInfoList(Model): 'items': {'key': 'Items', 'type': '[ReplicaInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["ReplicaInfo"]] = None, + **kwargs + ): super(PagedReplicaInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedSecretResourceDescriptionList(Model): - """The list of secret resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedSecretResourceDescriptionList(msrest.serialization.Model): + """The list of secret resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.SecretResourceDescription] @@ -14417,28 +17715,29 @@ class PagedSecretResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[SecretResourceDescription]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["SecretResourceDescription"]] = None, + **kwargs + ): super(PagedSecretResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedSecretValueResourceDescriptionList(Model): - """The list of values of a secret resource, paged if the number of results - exceeds the limits of a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in the previous page. +class PagedSecretValueResourceDescriptionList(msrest.serialization.Model): + """The list of values of a secret resource, paged if the number of results exceeds the limits of a single message. The next set of results can be obtained by executing the same query with the continuation token provided in the previous page. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. - :type items: - list[~azure.servicefabric.models.SecretValueResourceDescription] + :type items: list[~azure.servicefabric.models.SecretValueResourceDescription] """ _attribute_map = { @@ -14446,24 +17745,26 @@ class PagedSecretValueResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[SecretValueResourceDescription]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["SecretValueResourceDescription"]] = None, + **kwargs + ): super(PagedSecretValueResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedServiceInfoList(Model): - """The list of services in the cluster for an application. The list is paged - when all of the results cannot fit in a single message. The next set of - results can be obtained by executing the same query with the continuation - token provided in this list. +class PagedServiceInfoList(msrest.serialization.Model): + """The list of services in the cluster for an application. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of service information. :type items: list[~azure.servicefabric.models.ServiceInfo] @@ -14474,24 +17775,26 @@ class PagedServiceInfoList(Model): 'items': {'key': 'Items', 'type': '[ServiceInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["ServiceInfo"]] = None, + **kwargs + ): super(PagedServiceInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedServicePartitionInfoList(Model): - """The list of partition in the cluster for a service. The list is paged when - all of the results cannot fit in a single message. The next set of results - can be obtained by executing the same query with the continuation token - provided in this list. +class PagedServicePartitionInfoList(msrest.serialization.Model): + """The list of partition in the cluster for a service. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of service partition information. :type items: list[~azure.servicefabric.models.ServicePartitionInfo] @@ -14502,24 +17805,26 @@ class PagedServicePartitionInfoList(Model): 'items': {'key': 'Items', 'type': '[ServicePartitionInfo]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["ServicePartitionInfo"]] = None, + **kwargs + ): super(PagedServicePartitionInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedServiceReplicaDescriptionList(Model): - """The list of service resource replicas in the cluster. The list is paged - when all of the results cannot fit in a single message. The next set of - results can be obtained by executing the same query with the continuation - token provided in this list. +class PagedServiceReplicaDescriptionList(msrest.serialization.Model): + """The list of service resource replicas in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of service resource replica description. :type items: list[~azure.servicefabric.models.ServiceReplicaDescription] @@ -14530,23 +17835,26 @@ class PagedServiceReplicaDescriptionList(Model): 'items': {'key': 'Items', 'type': '[ServiceReplicaDescription]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["ServiceReplicaDescription"]] = None, + **kwargs + ): super(PagedServiceReplicaDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedServiceResourceDescriptionList(Model): - """The list of service resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedServiceResourceDescriptionList(msrest.serialization.Model): + """The list of service resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.ServiceResourceDescription] @@ -14557,28 +17865,29 @@ class PagedServiceResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[ServiceResourceDescription]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["ServiceResourceDescription"]] = None, + **kwargs + ): super(PagedServiceResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedSubNameInfoList(Model): - """A paged list of Service Fabric names. The list is paged when all of the - results cannot fit in a single message. The next set of results can be - obtained by executing the same query with the continuation token provided - in this list. +class PagedSubNameInfoList(msrest.serialization.Model): + """A paged list of Service Fabric names. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str - :param is_consistent: Indicates whether any name under the given name has - been modified during the enumeration. If there was a modification, this - property value is false. + :param is_consistent: Indicates whether any name under the given name has been modified during + the enumeration. If there was a modification, this property value is false. :type is_consistent: bool :param sub_names: List of the child names. :type sub_names: list[str] @@ -14590,25 +17899,28 @@ class PagedSubNameInfoList(Model): 'sub_names': {'key': 'SubNames', 'type': '[str]'}, } - def __init__(self, *, continuation_token: str=None, is_consistent: bool=None, sub_names=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + is_consistent: Optional[bool] = None, + sub_names: Optional[List[str]] = None, + **kwargs + ): super(PagedSubNameInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.is_consistent = is_consistent self.sub_names = sub_names -class PagedUpdatePartitionLoadResultList(Model): - """The list of results of the call UpdatePartitionLoad. The list is paged when - all of the results cannot fit in a single message. The next set of results - can be obtained by executing the same query with the continuation token - provided in this list. +class PagedUpdatePartitionLoadResultList(msrest.serialization.Model): + """The list of results of the call UpdatePartitionLoad. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: List of partition load update information. :type items: list[~azure.servicefabric.models.UpdatePartitionLoadResult] @@ -14619,23 +17931,26 @@ class PagedUpdatePartitionLoadResultList(Model): 'items': {'key': 'Items', 'type': '[UpdatePartitionLoadResult]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["UpdatePartitionLoadResult"]] = None, + **kwargs + ): super(PagedUpdatePartitionLoadResultList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedVolumeResourceDescriptionList(Model): - """The list of volume resources. The list is paged when all of the results - cannot fit in a single message. The next set of results can be obtained by - executing the same query with the continuation token provided in this list. +class PagedVolumeResourceDescriptionList(msrest.serialization.Model): + """The list of volume resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to - obtain next set of results. The continuation token is included in the - response of the API when the results from the system do not fit in a - single response. When this value is passed to the next API call, the API - returns next set of results. If there are no further results, then the - continuation token is not included in the response. + :param continuation_token: The continuation token parameter is used to obtain next set of + results. The continuation token is included in the response of the API when the results from + the system do not fit in a single response. When this value is passed to the next API call, the + API returns next set of results. If there are no further results, then the continuation token + is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.VolumeResourceDescription] @@ -14646,7 +17961,13 @@ class PagedVolumeResourceDescriptionList(Model): 'items': {'key': 'Items', 'type': '[VolumeResourceDescription]'}, } - def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + def __init__( + self, + *, + continuation_token: Optional[str] = None, + items: Optional[List["VolumeResourceDescription"]] = None, + **kwargs + ): super(PagedVolumeResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items @@ -14656,46 +17977,65 @@ class PartitionAnalysisEvent(PartitionEvent): """Represents the base for all Partition Analysis Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionPrimaryMoveAnalysisEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: PartitionPrimaryMoveAnalysisEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param metadata: Required. Metadata about an Analysis Event. :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'metadata': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, } @@ -14704,32 +18044,40 @@ class PartitionAnalysisEvent(PartitionEvent): 'kind': {'PartitionPrimaryMoveAnalysis': 'PartitionPrimaryMoveAnalysisEvent'} } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, metadata, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + metadata: "AnalysisEventMetadata", + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(PartitionAnalysisEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.kind = 'PartitionAnalysisEvent' # type: str self.metadata = metadata - self.kind = 'PartitionAnalysisEvent' class PartitionBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information, for a specific partition, specifying what - backup policy is being applied and suspend description, if any. + """Backup configuration information, for a specific partition, specifying what backup policy is being applied and suspend description, if any. All required parameters must be populated in order to send to Azure. - :param policy_name: The name of the backup policy which is applicable to - this Service Fabric application or service or partition. + :param kind: Required. The entity type of a Service Fabric entity such as Application, Service + or a Partition where periodic backups can be enabled.Constant filled by server. Possible + values include: "Invalid", "Partition", "Service", "Application". + :type kind: str or ~azure.servicefabric.models.BackupEntityKind + :param policy_name: The name of the backup policy which is applicable to this Service Fabric + application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup - policy is applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type policy_inherited_from: str or - ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup policy is applied. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param kind: Required. Constant filled by server. - :type kind: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str :param partition_id: The partition ID identifying the partition. :type partition_id: str @@ -14740,19 +18088,28 @@ class PartitionBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, service_name: str=None, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + policy_name: Optional[str] = None, + policy_inherited_from: Optional[Union[str, "BackupPolicyScope"]] = None, + suspension_info: Optional["BackupSuspensionInfo"] = None, + service_name: Optional[str] = None, + partition_id: Optional[str] = None, + **kwargs + ): super(PartitionBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info, **kwargs) + self.kind = 'Partition' # type: str self.service_name = service_name self.partition_id = partition_id - self.kind = 'Partition' class PartitionBackupEntity(BackupEntity): @@ -14760,10 +18117,11 @@ class PartitionBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. Constant filled by server. - :type entity_kind: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, + Service or a Partition where periodic backups can be enabled.Constant filled by server. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str :param partition_id: The partition ID identifying the partition. :type partition_id: str @@ -14779,24 +18137,28 @@ class PartitionBackupEntity(BackupEntity): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, service_name: str=None, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + partition_id: Optional[str] = None, + **kwargs + ): super(PartitionBackupEntity, self).__init__(**kwargs) + self.entity_kind = 'Partition' # type: str self.service_name = service_name self.partition_id = partition_id - self.entity_kind = 'Partition' -class PartitionDataLossProgress(Model): +class PartitionDataLossProgress(msrest.serialization.Model): """Information about a partition data loss user-induced operation. - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param invoke_data_loss_result: Represents information about an operation - in a terminal state (Completed or Faulted). - :type invoke_data_loss_result: - ~azure.servicefabric.models.InvokeDataLossResult + :param invoke_data_loss_result: Represents information about an operation in a terminal state + (Completed or Faulted). + :type invoke_data_loss_result: ~azure.servicefabric.models.InvokeDataLossResult """ _attribute_map = { @@ -14804,7 +18166,13 @@ class PartitionDataLossProgress(Model): 'invoke_data_loss_result': {'key': 'InvokeDataLossResult', 'type': 'InvokeDataLossResult'}, } - def __init__(self, *, state=None, invoke_data_loss_result=None, **kwargs) -> None: + def __init__( + self, + *, + state: Optional[Union[str, "OperationState"]] = None, + invoke_data_loss_result: Optional["InvokeDataLossResult"] = None, + **kwargs + ): super(PartitionDataLossProgress, self).__init__(**kwargs) self.state = state self.invoke_data_loss_result = invoke_data_loss_result @@ -14813,30 +18181,25 @@ def __init__(self, *, state=None, invoke_data_loss_result=None, **kwargs) -> Non class PartitionHealth(EntityHealth): """Information about the health of a Service Fabric partition. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param partition_id: ID of the partition whose health information is - described by this object. + :param partition_id: ID of the partition whose health information is described by this object. :type partition_id: str - :param replica_health_states: The list of replica health states associated - with the partition. - :type replica_health_states: - list[~azure.servicefabric.models.ReplicaHealthState] + :param replica_health_states: The list of replica health states associated with the partition. + :type replica_health_states: list[~azure.servicefabric.models.ReplicaHealthState] """ _attribute_map = { @@ -14848,40 +18211,50 @@ class PartitionHealth(EntityHealth): 'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, replica_health_states=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + partition_id: Optional[str] = None, + replica_health_states: Optional[List["ReplicaHealthState"]] = None, + **kwargs + ): super(PartitionHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.partition_id = partition_id self.replica_health_states = replica_health_states class PartitionHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a partition, containing information about - the data and the algorithm used by health store to evaluate health. The - evaluation is returned only when the aggregated health state is either - Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a partition, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition whose health evaluation is - described by this object. + :param partition_id: Id of the partition whose health evaluation is described by this object. :type partition_id: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the partition. The types of the - unhealthy evaluations can be ReplicasHealthEvaluation or - EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the partition. The types of the unhealthy evaluations can be + ReplicasHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -14889,18 +18262,26 @@ class PartitionHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, partition_id: str=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + partition_id: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(PartitionHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Partition' # type: str self.partition_id = partition_id self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Partition' class PartitionHealthReportExpiredEvent(PartitionEvent): @@ -14908,23 +18289,42 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param source_id: Required. Id of report source. :type source_id: str @@ -14938,17 +18338,16 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, @@ -14961,11 +18360,11 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, @@ -14977,8 +18376,26 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(PartitionHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.kind = 'PartitionHealthReportExpired' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -14987,21 +18404,16 @@ def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, sou self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'PartitionHealthReportExpired' class PartitionHealthState(EntityHealthState): - """Represents the health state of a partition, which contains the partition - identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param partition_id: Id of the partition whose health state is described - by this object. + """Represents the health state of a partition, which contains the partition identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param partition_id: Id of the partition whose health state is described by this object. :type partition_id: str """ @@ -15010,27 +18422,29 @@ class PartitionHealthState(EntityHealthState): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + partition_id: Optional[str] = None, + **kwargs + ): super(PartitionHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.partition_id = partition_id class PartitionHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a partition, which contains the - partition ID, its aggregated health state and any replicas that respect the - filters in the cluster health chunk query description. + """Represents the health state chunk of a partition, which contains the partition ID, its aggregated health state and any replicas that respect the filters in the cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param partition_id: The Id of the partition. :type partition_id: str - :param replica_health_state_chunks: The list of replica health state - chunks belonging to the partition that respect the filters in the cluster - health chunk query description. - :type replica_health_state_chunks: - ~azure.servicefabric.models.ReplicaHealthStateChunkList + :param replica_health_state_chunks: The list of replica health state chunks belonging to the + partition that respect the filters in the cluster health chunk query description. + :type replica_health_state_chunks: ~azure.servicefabric.models.ReplicaHealthStateChunkList """ _attribute_map = { @@ -15039,20 +18453,25 @@ class PartitionHealthStateChunk(EntityHealthStateChunk): 'replica_health_state_chunks': {'key': 'ReplicaHealthStateChunks', 'type': 'ReplicaHealthStateChunkList'}, } - def __init__(self, *, health_state=None, partition_id: str=None, replica_health_state_chunks=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + partition_id: Optional[str] = None, + replica_health_state_chunks: Optional["ReplicaHealthStateChunkList"] = None, + **kwargs + ): super(PartitionHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.partition_id = partition_id self.replica_health_state_chunks = replica_health_state_chunks -class PartitionHealthStateChunkList(Model): - """The list of partition health state chunks that respect the input filters in - the chunk query description. - Returned by get cluster health state chunks query as part of the parent - application hierarchy. +class PartitionHealthStateChunkList(msrest.serialization.Model): + """The list of partition health state chunks that respect the input filters in the chunk query description. +Returned by get cluster health state chunks query as part of the parent application hierarchy. - :param items: The list of partition health state chunks that respect the - input filters in the chunk query. + :param items: The list of partition health state chunks that respect the input filters in the + chunk query. :type items: list[~azure.servicefabric.models.PartitionHealthStateChunk] """ @@ -15060,68 +18479,60 @@ class PartitionHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[PartitionHealthStateChunk]'}, } - def __init__(self, *, items=None, **kwargs) -> None: + def __init__( + self, + *, + items: Optional[List["PartitionHealthStateChunk"]] = None, + **kwargs + ): super(PartitionHealthStateChunkList, self).__init__(**kwargs) self.items = items -class PartitionHealthStateFilter(Model): - """Defines matching criteria to determine whether a partition should be - included as a child of a service in the cluster health chunk. - The partitions are only returned if the parent entities match a filter - specified in the cluster health chunk query description. The parent service - and application must be included in the cluster health chunk. - One filter can match zero, one or multiple partitions, depending on its - properties. - - :param partition_id_filter: ID of the partition that matches the filter. - The filter is applied only to the specified partition, if it exists. - If the partition doesn't exist, no partition is returned in the cluster - health chunk based on this filter. - If the partition exists, it is included in the cluster health chunk if it - respects the other filter properties. - If not specified, all partitions that match the parent filters (if any) - are taken into consideration and matched against the other filter members, - like health state filter. +class PartitionHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a partition should be included as a child of a service in the cluster health chunk. +The partitions are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent service and application must be included in the cluster health chunk. +One filter can match zero, one or multiple partitions, depending on its properties. + + :param partition_id_filter: ID of the partition that matches the filter. The filter is applied + only to the specified partition, if it exists. + If the partition doesn't exist, no partition is returned in the cluster health chunk based on + this filter. + If the partition exists, it is included in the cluster health chunk if it respects the other + filter properties. + If not specified, all partitions that match the parent filters (if any) are taken into + consideration and matched against the other filter members, like health state filter. :type partition_id_filter: str - :param health_state_filter: The filter for the health state of the - partitions. It allows selecting partitions if they match the desired - health states. - The possible values are integer value of one of the following health - states. Only partitions that match the filter are returned. All partitions - are used to evaluate the cluster aggregated health state. - If not specified, default value is None, unless the partition ID is - specified. If the filter has default value and partition ID is specified, - the matching partition is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches partitions with - HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the partitions. It allows + selecting partitions if they match the desired health states. + The possible values are integer value of one of the following health states. Only partitions + that match the filter are returned. All partitions are used to evaluate the cluster aggregated + health state. + If not specified, default value is None, unless the partition ID is specified. If the filter + has default value and partition ID is specified, the matching partition is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches partitions with HealthState value of OK + (2) and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int - :param replica_filters: Defines a list of filters that specify which - replicas to be included in the returned cluster health chunk as children - of the parent partition. The replicas are returned only if the parent - partition matches a filter. - If the list is empty, no replicas are returned. All the replicas are used - to evaluate the parent partition aggregated health state, regardless of - the input filters. + :param replica_filters: Defines a list of filters that specify which replicas to be included in + the returned cluster health chunk as children of the parent partition. The replicas are + returned only if the parent partition matches a filter. + If the list is empty, no replicas are returned. All the replicas are used to evaluate the + parent partition aggregated health state, regardless of the input filters. The partition filter may specify multiple replica filters. - For example, it can specify a filter to return all replicas with health - state Error and another filter to always include a replica identified by - its replica id. - :type replica_filters: - list[~azure.servicefabric.models.ReplicaHealthStateFilter] + For example, it can specify a filter to return all replicas with health state Error and + another filter to always include a replica identified by its replica id. + :type replica_filters: list[~azure.servicefabric.models.ReplicaHealthStateFilter] """ _attribute_map = { @@ -15130,7 +18541,14 @@ class PartitionHealthStateFilter(Model): 'replica_filters': {'key': 'ReplicaFilters', 'type': '[ReplicaHealthStateFilter]'}, } - def __init__(self, *, partition_id_filter: str=None, health_state_filter: int=0, replica_filters=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id_filter: Optional[str] = None, + health_state_filter: Optional[int] = 0, + replica_filters: Optional[List["ReplicaHealthStateFilter"]] = None, + **kwargs + ): super(PartitionHealthStateFilter, self).__init__(**kwargs) self.partition_id_filter = partition_id_filter self.health_state_filter = health_state_filter @@ -15138,21 +18556,20 @@ def __init__(self, *, partition_id_filter: str=None, health_state_filter: int=0, class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): - """Represents a scaling mechanism for adding or removing instances of - stateless service partition. + """Represents a scaling mechanism for adding or removing instances of stateless service partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param min_instance_count: Required. Minimum number of instances of the - partition. + :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. + Possible values include: "Invalid", "PartitionInstanceCount", + "AddRemoveIncrementalNamedPartition". + :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind + :param min_instance_count: Required. Minimum number of instances of the partition. :type min_instance_count: int - :param max_instance_count: Required. Maximum number of instances of the - partition. + :param max_instance_count: Required. Maximum number of instances of the partition. :type max_instance_count: int - :param scale_increment: Required. The number of instances to add or remove - during a scaling operation. + :param scale_increment: Required. The number of instances to add or remove during a scaling + operation. :type scale_increment: int """ @@ -15170,32 +18587,35 @@ class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, } - def __init__(self, *, min_instance_count: int, max_instance_count: int, scale_increment: int, **kwargs) -> None: + def __init__( + self, + *, + min_instance_count: int, + max_instance_count: int, + scale_increment: int, + **kwargs + ): super(PartitionInstanceCountScaleMechanism, self).__init__(**kwargs) + self.kind = 'PartitionInstanceCount' # type: str self.min_instance_count = min_instance_count self.max_instance_count = max_instance_count self.scale_increment = scale_increment - self.kind = 'PartitionInstanceCount' -class PartitionLoadInformation(Model): - """Represents load information for a partition, which contains the primary and - secondary reported load metrics. - In case there is no load reported, PartitionLoadInformation will contain - the default load for the service of the partition. - For default loads, LoadMetricReport's LastReportedUtc is set to 0. +class PartitionLoadInformation(msrest.serialization.Model): + """Represents load information for a partition, which contains the primary and secondary reported load metrics. +In case there is no load reported, PartitionLoadInformation will contain the default load for the service of the partition. +For default loads, LoadMetricReport's LastReportedUtc is set to 0. :param partition_id: Id of the partition. :type partition_id: str - :param primary_load_metric_reports: Array of load reports from the primary - replica for this partition. - :type primary_load_metric_reports: - list[~azure.servicefabric.models.LoadMetricReport] - :param secondary_load_metric_reports: Array of aggregated load reports - from all secondary replicas for this partition. + :param primary_load_metric_reports: Array of load reports from the primary replica for this + partition. + :type primary_load_metric_reports: list[~azure.servicefabric.models.LoadMetricReport] + :param secondary_load_metric_reports: Array of aggregated load reports from all secondary + replicas for this partition. Array only contains the latest reported load for each metric. - :type secondary_load_metric_reports: - list[~azure.servicefabric.models.LoadMetricReport] + :type secondary_load_metric_reports: list[~azure.servicefabric.models.LoadMetricReport] """ _attribute_map = { @@ -15204,31 +18624,34 @@ class PartitionLoadInformation(Model): 'secondary_load_metric_reports': {'key': 'SecondaryLoadMetricReports', 'type': '[LoadMetricReport]'}, } - def __init__(self, *, partition_id: str=None, primary_load_metric_reports=None, secondary_load_metric_reports=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + primary_load_metric_reports: Optional[List["LoadMetricReport"]] = None, + secondary_load_metric_reports: Optional[List["LoadMetricReport"]] = None, + **kwargs + ): super(PartitionLoadInformation, self).__init__(**kwargs) self.partition_id = partition_id self.primary_load_metric_reports = primary_load_metric_reports self.secondary_load_metric_reports = secondary_load_metric_reports -class PartitionMetricLoadDescription(Model): - """Represents load information for a partition, which contains the metrics - load information about primary, all secondary replicas/instances or a - specific secondary replica/instance located on a specific node. +class PartitionMetricLoadDescription(msrest.serialization.Model): + """Represents load information for a partition, which contains the metrics load information about primary, all secondary replicas/instances or a specific secondary replica/instance located on a specific node. :param partition_id: Id of the partition. :type partition_id: str - :param primary_replica_load_entries: Partition's load information for - primary replica, in case partition is from a stateful service. - :type primary_replica_load_entries: - list[~azure.servicefabric.models.MetricLoadDescription] - :param secondary_replicas_or_instances_load_entries: Partition's load - information for all secondary replicas or instances. + :param primary_replica_load_entries: Partition's load information for primary replica, in case + partition is from a stateful service. + :type primary_replica_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] + :param secondary_replicas_or_instances_load_entries: Partition's load information for all + secondary replicas or instances. :type secondary_replicas_or_instances_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] - :param secondary_replica_or_instance_load_entries_per_node: Partition's - load information for a specific secondary replica or instance located on a - specific node. + :param secondary_replica_or_instance_load_entries_per_node: Partition's load information for a + specific secondary replica or instance located on a specific node. :type secondary_replica_or_instance_load_entries_per_node: list[~azure.servicefabric.models.ReplicaMetricLoadDescription] """ @@ -15240,7 +18663,15 @@ class PartitionMetricLoadDescription(Model): 'secondary_replica_or_instance_load_entries_per_node': {'key': 'SecondaryReplicaOrInstanceLoadEntriesPerNode', 'type': '[ReplicaMetricLoadDescription]'}, } - def __init__(self, *, partition_id: str=None, primary_replica_load_entries=None, secondary_replicas_or_instances_load_entries=None, secondary_replica_or_instance_load_entries_per_node=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + primary_replica_load_entries: Optional[List["MetricLoadDescription"]] = None, + secondary_replicas_or_instances_load_entries: Optional[List["MetricLoadDescription"]] = None, + secondary_replica_or_instance_load_entries_per_node: Optional[List["ReplicaMetricLoadDescription"]] = None, + **kwargs + ): super(PartitionMetricLoadDescription, self).__init__(**kwargs) self.partition_id = partition_id self.primary_replica_load_entries = primary_replica_load_entries @@ -15253,23 +18684,42 @@ class PartitionNewHealthReportEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param source_id: Required. Id of report source. :type source_id: str @@ -15283,17 +18733,16 @@ class PartitionNewHealthReportEvent(PartitionEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, @@ -15306,11 +18755,11 @@ class PartitionNewHealthReportEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, @@ -15322,8 +18771,26 @@ class PartitionNewHealthReportEvent(PartitionEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(PartitionNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.kind = 'PartitionNewHealthReport' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -15332,7 +18799,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, sou self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'PartitionNewHealthReport' class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): @@ -15340,28 +18806,47 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param metadata: Required. Metadata about an Analysis Event. :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata :param when_move_completed: Required. Time when the move was completed. - :type when_move_completed: datetime + :type when_move_completed: ~datetime.datetime :param previous_node: Required. The name of a Service Fabric node. :type previous_node: str :param current_node: Required. The name of a Service Fabric node. @@ -15373,9 +18858,9 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'metadata': {'required': True}, 'when_move_completed': {'required': True}, @@ -15386,11 +18871,11 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, 'when_move_completed': {'key': 'WhenMoveCompleted', 'type': 'iso-8601'}, @@ -15400,27 +18885,40 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): 'relevant_traces': {'key': 'RelevantTraces', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, metadata, when_move_completed, previous_node: str, current_node: str, move_reason: str, relevant_traces: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + metadata: "AnalysisEventMetadata", + when_move_completed: datetime.datetime, + previous_node: str, + current_node: str, + move_reason: str, + relevant_traces: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(PartitionPrimaryMoveAnalysisEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, metadata=metadata, **kwargs) + self.kind = 'PartitionPrimaryMoveAnalysis' # type: str self.when_move_completed = when_move_completed self.previous_node = previous_node self.current_node = current_node self.move_reason = move_reason self.relevant_traces = relevant_traces - self.kind = 'PartitionPrimaryMoveAnalysis' -class PartitionQuorumLossProgress(Model): +class PartitionQuorumLossProgress(msrest.serialization.Model): """Information about a partition quorum loss user-induced operation. - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param invoke_quorum_loss_result: Represents information about an - operation in a terminal state (Completed or Faulted). - :type invoke_quorum_loss_result: - ~azure.servicefabric.models.InvokeQuorumLossResult + :param invoke_quorum_loss_result: Represents information about an operation in a terminal state + (Completed or Faulted). + :type invoke_quorum_loss_result: ~azure.servicefabric.models.InvokeQuorumLossResult """ _attribute_map = { @@ -15428,7 +18926,13 @@ class PartitionQuorumLossProgress(Model): 'invoke_quorum_loss_result': {'key': 'InvokeQuorumLossResult', 'type': 'InvokeQuorumLossResult'}, } - def __init__(self, *, state=None, invoke_quorum_loss_result=None, **kwargs) -> None: + def __init__( + self, + *, + state: Optional[Union[str, "OperationState"]] = None, + invoke_quorum_loss_result: Optional["InvokeQuorumLossResult"] = None, + **kwargs + ): super(PartitionQuorumLossProgress, self).__init__(**kwargs) self.state = state self.invoke_quorum_loss_result = invoke_quorum_loss_result @@ -15439,23 +18943,42 @@ class PartitionReconfiguredEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -15486,9 +19009,9 @@ class PartitionReconfiguredEvent(PartitionEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, @@ -15506,11 +19029,11 @@ class PartitionReconfiguredEvent(PartitionEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, @@ -15527,8 +19050,31 @@ class PartitionReconfiguredEvent(PartitionEvent): 'total_duration_ms': {'key': 'TotalDurationMs', 'type': 'float'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, node_name: str, node_instance_id: str, service_type: str, cc_epoch_data_loss_version: int, cc_epoch_config_version: int, reconfig_type: str, result: str, phase0_duration_ms: float, phase1_duration_ms: float, phase2_duration_ms: float, phase3_duration_ms: float, phase4_duration_ms: float, total_duration_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + node_name: str, + node_instance_id: str, + service_type: str, + cc_epoch_data_loss_version: int, + cc_epoch_config_version: int, + reconfig_type: str, + result: str, + phase0_duration_ms: float, + phase1_duration_ms: float, + phase2_duration_ms: float, + phase3_duration_ms: float, + phase4_duration_ms: float, + total_duration_ms: float, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(PartitionReconfiguredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.kind = 'PartitionReconfigured' # type: str self.node_name = node_name self.node_instance_id = node_instance_id self.service_type = service_type @@ -15542,20 +19088,17 @@ def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, nod self.phase3_duration_ms = phase3_duration_ms self.phase4_duration_ms = phase4_duration_ms self.total_duration_ms = total_duration_ms - self.kind = 'PartitionReconfigured' -class PartitionRestartProgress(Model): +class PartitionRestartProgress(msrest.serialization.Model): """Information about a partition restart user-induced operation. - :param state: The state of the operation. Possible values include: - 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', - 'ForceCancelled' + :param state: The state of the operation. Possible values include: "Invalid", "Running", + "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". :type state: str or ~azure.servicefabric.models.OperationState - :param restart_partition_result: Represents information about an operation - in a terminal state (Completed or Faulted). - :type restart_partition_result: - ~azure.servicefabric.models.RestartPartitionResult + :param restart_partition_result: Represents information about an operation in a terminal state + (Completed or Faulted). + :type restart_partition_result: ~azure.servicefabric.models.RestartPartitionResult """ _attribute_map = { @@ -15563,43 +19106,49 @@ class PartitionRestartProgress(Model): 'restart_partition_result': {'key': 'RestartPartitionResult', 'type': 'RestartPartitionResult'}, } - def __init__(self, *, state=None, restart_partition_result=None, **kwargs) -> None: + def __init__( + self, + *, + state: Optional[Union[str, "OperationState"]] = None, + restart_partition_result: Optional["RestartPartitionResult"] = None, + **kwargs + ): super(PartitionRestartProgress, self).__init__(**kwargs) self.state = state self.restart_partition_result = restart_partition_result class PartitionsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for the partitions of a service, containing - health evaluations for each unhealthy partition that impacts current - aggregated health state. Can be returned when evaluating service health and - the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for the partitions of a service, containing health evaluations for each unhealthy partition that impacts current aggregated health state. Can be returned when evaluating service health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_partitions_per_service: Maximum allowed - percentage of unhealthy partitions per service from the - ServiceTypeHealthPolicy. + :param max_percent_unhealthy_partitions_per_service: Maximum allowed percentage of unhealthy + partitions per service from the ServiceTypeHealthPolicy. :type max_percent_unhealthy_partitions_per_service: int - :param total_count: Total number of partitions of the service from the - health store. + :param total_count: Total number of partitions of the service from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - PartitionHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy PartitionHealthEvaluation that impacted the aggregated + health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -15607,35 +19156,43 @@ class PartitionsHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_partitions_per_service': {'key': 'MaxPercentUnhealthyPartitionsPerService', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_partitions_per_service: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + max_percent_unhealthy_partitions_per_service: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(PartitionsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Partitions' # type: str self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Partitions' -class ReplicatorStatus(Model): +class ReplicatorStatus(msrest.serialization.Model): """Represents a base class for primary or secondary replicator status. - Contains information about the service fabric replicator like the - replication/copy queue utilization, last acknowledgement received - timestamp, etc. +Contains information about the service fabric replicator like the replication/copy queue utilization, last acknowledgement received timestamp, etc. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus + sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole """ _validation = { @@ -15650,27 +19207,27 @@ class ReplicatorStatus(Model): 'kind': {'Primary': 'PrimaryReplicatorStatus', 'SecondaryReplicatorStatus': 'SecondaryReplicatorStatus'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(ReplicatorStatus, self).__init__(**kwargs) - self.kind = None + self.kind = None # type: Optional[str] class PrimaryReplicatorStatus(ReplicatorStatus): - """Provides statistics about the Service Fabric Replicator, when it is - functioning in a Primary role. + """Provides statistics about the Service Fabric Replicator, when it is functioning in a Primary role. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param replication_queue_status: Details about the replication queue on - the primary replicator. - :type replication_queue_status: - ~azure.servicefabric.models.ReplicatorQueueStatus - :param remote_replicators: The status of all the active and idle secondary - replicators that the primary is aware of. - :type remote_replicators: - list[~azure.servicefabric.models.RemoteReplicatorStatus] + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param replication_queue_status: Details about the replication queue on the primary replicator. + :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param remote_replicators: The status of all the active and idle secondary replicators that the + primary is aware of. + :type remote_replicators: list[~azure.servicefabric.models.RemoteReplicatorStatus] """ _validation = { @@ -15683,30 +19240,34 @@ class PrimaryReplicatorStatus(ReplicatorStatus): 'remote_replicators': {'key': 'RemoteReplicators', 'type': '[RemoteReplicatorStatus]'}, } - def __init__(self, *, replication_queue_status=None, remote_replicators=None, **kwargs) -> None: + def __init__( + self, + *, + replication_queue_status: Optional["ReplicatorQueueStatus"] = None, + remote_replicators: Optional[List["RemoteReplicatorStatus"]] = None, + **kwargs + ): super(PrimaryReplicatorStatus, self).__init__(**kwargs) + self.kind = 'Primary' # type: str self.replication_queue_status = replication_queue_status self.remote_replicators = remote_replicators - self.kind = 'Primary' -class Probe(Model): +class Probe(msrest.serialization.Model): """Probes have a number of fields that you can use to control their behavior. - :param initial_delay_seconds: The initial delay in seconds to start - executing probe once codepackage has started. Default value: 0 . + :param initial_delay_seconds: The initial delay in seconds to start executing probe once + codepackage has started. :type initial_delay_seconds: int - :param period_seconds: Periodic seconds to execute probe. Default value: - 10 . + :param period_seconds: Periodic seconds to execute probe. :type period_seconds: int - :param timeout_seconds: Period after which probe is considered as failed - if it hasn't completed successfully. Default value: 1 . + :param timeout_seconds: Period after which probe is considered as failed if it hasn't completed + successfully. :type timeout_seconds: int - :param success_threshold: The count of successful probe executions after - which probe is considered success. Default value: 1 . + :param success_threshold: The count of successful probe executions after which probe is + considered success. :type success_threshold: int - :param failure_threshold: The count of failures after which probe is - considered failed. Default value: 3 . + :param failure_threshold: The count of failures after which probe is considered failed. :type failure_threshold: int :param exec_property: Exec command to run inside the container. :type exec_property: ~azure.servicefabric.models.ProbeExec @@ -15727,7 +19288,19 @@ class Probe(Model): 'tcp_socket': {'key': 'tcpSocket', 'type': 'ProbeTcpSocket'}, } - def __init__(self, *, initial_delay_seconds: int=0, period_seconds: int=10, timeout_seconds: int=1, success_threshold: int=1, failure_threshold: int=3, exec_property=None, http_get=None, tcp_socket=None, **kwargs) -> None: + def __init__( + self, + *, + initial_delay_seconds: Optional[int] = 0, + period_seconds: Optional[int] = 10, + timeout_seconds: Optional[int] = 1, + success_threshold: Optional[int] = 1, + failure_threshold: Optional[int] = 3, + exec_property: Optional["ProbeExec"] = None, + http_get: Optional["ProbeHttpGet"] = None, + tcp_socket: Optional["ProbeTcpSocket"] = None, + **kwargs + ): super(Probe, self).__init__(**kwargs) self.initial_delay_seconds = initial_delay_seconds self.period_seconds = period_seconds @@ -15739,13 +19312,13 @@ def __init__(self, *, initial_delay_seconds: int=0, period_seconds: int=10, time self.tcp_socket = tcp_socket -class ProbeExec(Model): +class ProbeExec(msrest.serialization.Model): """Exec command to run inside the container. All required parameters must be populated in order to send to Azure. - :param command: Required. Comma separated command to run inside the - container for example "sh, -c, echo hello world". + :param command: Required. Comma separated command to run inside the container for example "sh, + -c, echo hello world". :type command: str """ @@ -15757,12 +19330,17 @@ class ProbeExec(Model): 'command': {'key': 'command', 'type': 'str'}, } - def __init__(self, *, command: str, **kwargs) -> None: + def __init__( + self, + *, + command: str, + **kwargs + ): super(ProbeExec, self).__init__(**kwargs) self.command = command -class ProbeHttpGet(Model): +class ProbeHttpGet(msrest.serialization.Model): """Http probe for the container. All required parameters must be populated in order to send to Azure. @@ -15775,8 +19353,8 @@ class ProbeHttpGet(Model): :type host: str :param http_headers: Headers to set in the request. :type http_headers: list[~azure.servicefabric.models.ProbeHttpGetHeaders] - :param scheme: Scheme for the http probe. Can be Http or Https. Possible - values include: 'http', 'https' + :param scheme: Scheme for the http probe. Can be Http or Https. Possible values include: + "http", "https". :type scheme: str or ~azure.servicefabric.models.Scheme """ @@ -15792,7 +19370,16 @@ class ProbeHttpGet(Model): 'scheme': {'key': 'scheme', 'type': 'str'}, } - def __init__(self, *, port: int, path: str=None, host: str=None, http_headers=None, scheme=None, **kwargs) -> None: + def __init__( + self, + *, + port: int, + path: Optional[str] = None, + host: Optional[str] = None, + http_headers: Optional[List["ProbeHttpGetHeaders"]] = None, + scheme: Optional[Union[str, "Scheme"]] = None, + **kwargs + ): super(ProbeHttpGet, self).__init__(**kwargs) self.port = port self.path = path @@ -15801,7 +19388,7 @@ def __init__(self, *, port: int, path: str=None, host: str=None, http_headers=No self.scheme = scheme -class ProbeHttpGetHeaders(Model): +class ProbeHttpGetHeaders(msrest.serialization.Model): """Http headers. All required parameters must be populated in order to send to Azure. @@ -15822,13 +19409,19 @@ class ProbeHttpGetHeaders(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, *, name: str, value: str, **kwargs) -> None: + def __init__( + self, + *, + name: str, + value: str, + **kwargs + ): super(ProbeHttpGetHeaders, self).__init__(**kwargs) self.name = name self.value = value -class ProbeTcpSocket(Model): +class ProbeTcpSocket(msrest.serialization.Model): """Tcp port to probe inside the container. All required parameters must be populated in order to send to Azure. @@ -15845,14 +19438,18 @@ class ProbeTcpSocket(Model): 'port': {'key': 'port', 'type': 'int'}, } - def __init__(self, *, port: int, **kwargs) -> None: + def __init__( + self, + *, + port: int, + **kwargs + ): super(ProbeTcpSocket, self).__init__(**kwargs) self.port = port -class PropertyBatchDescriptionList(Model): - """Describes a list of property batch operations to be executed. Either all or - none of the operations will be committed. +class PropertyBatchDescriptionList(msrest.serialization.Model): + """Describes a list of property batch operations to be executed. Either all or none of the operations will be committed. :param operations: A list of the property batch operations to be executed. :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] @@ -15862,20 +19459,25 @@ class PropertyBatchDescriptionList(Model): 'operations': {'key': 'Operations', 'type': '[PropertyBatchOperation]'}, } - def __init__(self, *, operations=None, **kwargs) -> None: + def __init__( + self, + *, + operations: Optional[List["PropertyBatchOperation"]] = None, + **kwargs + ): super(PropertyBatchDescriptionList, self).__init__(**kwargs) self.operations = operations -class PropertyDescription(Model): +class PropertyDescription(msrest.serialization.Model): """Description of a Service Fabric property. All required parameters must be populated in order to send to Azure. :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param custom_type_id: The property's custom type ID. Using this property, - the user is able to tag the type of the value of the property. + :param custom_type_id: The property's custom type ID. Using this property, the user is able to + tag the type of the value of the property. :type custom_type_id: str :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue @@ -15892,14 +19494,21 @@ class PropertyDescription(Model): 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__(self, *, property_name: str, value, custom_type_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + property_name: str, + value: "PropertyValue", + custom_type_id: Optional[str] = None, + **kwargs + ): super(PropertyDescription, self).__init__(**kwargs) self.property_name = property_name self.custom_type_id = custom_type_id self.value = value -class PropertyInfo(Model): +class PropertyInfo(msrest.serialization.Model): """Information about a Service Fabric property. All required parameters must be populated in order to send to Azure. @@ -15908,8 +19517,8 @@ class PropertyInfo(Model): :type name: str :param value: Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue - :param metadata: Required. The metadata associated with a property, - including the property's name. + :param metadata: Required. The metadata associated with a property, including the property's + name. :type metadata: ~azure.servicefabric.models.PropertyMetadata """ @@ -15924,33 +19533,39 @@ class PropertyInfo(Model): 'metadata': {'key': 'Metadata', 'type': 'PropertyMetadata'}, } - def __init__(self, *, name: str, metadata, value=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + metadata: "PropertyMetadata", + value: Optional["PropertyValue"] = None, + **kwargs + ): super(PropertyInfo, self).__init__(**kwargs) self.name = name self.value = value self.metadata = metadata -class PropertyMetadata(Model): +class PropertyMetadata(msrest.serialization.Model): """The metadata associated with a property, including the property's name. - :param type_id: The kind of property, determined by the type of data. - Following are the possible values. Possible values include: 'Invalid', - 'Binary', 'Int64', 'Double', 'String', 'Guid' + :param type_id: The kind of property, determined by the type of data. Following are the + possible values. Possible values include: "Invalid", "Binary", "Int64", "Double", "String", + "Guid". :type type_id: str or ~azure.servicefabric.models.PropertyValueKind :param custom_type_id: The property's custom type ID. :type custom_type_id: str - :param parent: The name of the parent Service Fabric Name for the - property. It could be thought of as the name-space/table under which the - property exists. + :param parent: The name of the parent Service Fabric Name for the property. It could be thought + of as the name-space/table under which the property exists. :type parent: str :param size_in_bytes: The length of the serialized property value. :type size_in_bytes: int - :param last_modified_utc_timestamp: Represents when the Property was last - modified. Only write operations will cause this field to be updated. - :type last_modified_utc_timestamp: datetime - :param sequence_number: The version of the property. Every time a property - is modified, its sequence number is increased. + :param last_modified_utc_timestamp: Represents when the Property was last modified. Only write + operations will cause this field to be updated. + :type last_modified_utc_timestamp: ~datetime.datetime + :param sequence_number: The version of the property. Every time a property is modified, its + sequence number is increased. :type sequence_number: str """ @@ -15963,7 +19578,17 @@ class PropertyMetadata(Model): 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__(self, *, type_id=None, custom_type_id: str=None, parent: str=None, size_in_bytes: int=None, last_modified_utc_timestamp=None, sequence_number: str=None, **kwargs) -> None: + def __init__( + self, + *, + type_id: Optional[Union[str, "PropertyValueKind"]] = None, + custom_type_id: Optional[str] = None, + parent: Optional[str] = None, + size_in_bytes: Optional[int] = None, + last_modified_utc_timestamp: Optional[datetime.datetime] = None, + sequence_number: Optional[str] = None, + **kwargs + ): super(PropertyMetadata, self).__init__(**kwargs) self.type_id = type_id self.custom_type_id = custom_type_id @@ -15974,52 +19599,58 @@ def __init__(self, *, type_id=None, custom_type_id: str=None, parent: str=None, class ProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): - """Describes the operation to register or provision an application type using - an application package uploaded to the Service Fabric image store. + """Describes the operation to register or provision an application type using an application package uploaded to the Service Fabric image store. All required parameters must be populated in order to send to Azure. - :param async_property: Required. Indicates whether or not provisioning - should occur asynchronously. When set to true, the provision operation - returns when the request is accepted by the system, and the provision - operation continues without any timeout limit. The default value is false. - For large application packages, we recommend setting the value to true. + :param kind: Required. The kind of application type registration or provision requested. The + application package can be registered or provisioned either from the image store or from an + external store. Following are the kinds of the application type provision.Constant filled by + server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". + :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind + :param async_property: Required. Indicates whether or not provisioning should occur + asynchronously. When set to true, the provision operation returns when the request is accepted + by the system, and the provision operation continues without any timeout limit. The default + value is false. For large application packages, we recommend setting the value to true. :type async_property: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param application_type_build_path: Required. The relative path for the - application package in the image store specified during the prior upload - operation. + :param application_type_build_path: Required. The relative path for the application package in + the image store specified during the prior upload operation. :type application_type_build_path: str - :param application_package_cleanup_policy: The kind of action that needs - to be taken for cleaning up the application package after successful - provision. Possible values include: 'Invalid', 'Default', 'Automatic', - 'Manual' + :param application_package_cleanup_policy: The kind of action that needs to be taken for + cleaning up the application package after successful provision. Possible values include: + "Invalid", "Default", "Automatic", "Manual". :type application_package_cleanup_policy: str or ~azure.servicefabric.models.ApplicationPackageCleanupPolicy """ _validation = { - 'async_property': {'required': True}, 'kind': {'required': True}, + 'async_property': {'required': True}, 'application_type_build_path': {'required': True}, } _attribute_map = { - 'async_property': {'key': 'Async', 'type': 'bool'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'async_property': {'key': 'Async', 'type': 'bool'}, 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, 'application_package_cleanup_policy': {'key': 'ApplicationPackageCleanupPolicy', 'type': 'str'}, } - def __init__(self, *, async_property: bool, application_type_build_path: str, application_package_cleanup_policy=None, **kwargs) -> None: + def __init__( + self, + *, + async_property: bool, + application_type_build_path: str, + application_package_cleanup_policy: Optional[Union[str, "ApplicationPackageCleanupPolicy"]] = None, + **kwargs + ): super(ProvisionApplicationTypeDescription, self).__init__(async_property=async_property, **kwargs) + self.kind = 'ImageStorePath' # type: str self.application_type_build_path = application_type_build_path self.application_package_cleanup_policy = application_package_cleanup_policy - self.kind = 'ImageStorePath' -class ProvisionFabricDescription(Model): +class ProvisionFabricDescription(msrest.serialization.Model): """Describes the parameters for provisioning a cluster. :param code_file_path: The cluster code package file path. @@ -16033,7 +19664,13 @@ class ProvisionFabricDescription(Model): 'cluster_manifest_file_path': {'key': 'ClusterManifestFilePath', 'type': 'str'}, } - def __init__(self, *, code_file_path: str=None, cluster_manifest_file_path: str=None, **kwargs) -> None: + def __init__( + self, + *, + code_file_path: Optional[str] = None, + cluster_manifest_file_path: Optional[str] = None, + **kwargs + ): super(ProvisionFabricDescription, self).__init__(**kwargs) self.code_file_path = code_file_path self.cluster_manifest_file_path = cluster_manifest_file_path @@ -16041,66 +19678,68 @@ def __init__(self, *, code_file_path: str=None, cluster_manifest_file_path: str= class PutPropertyBatchOperation(PropertyBatchOperation): """Puts the specified property under the specified name. - Note that if one PropertyBatchOperation in a PropertyBatch fails, - the entire batch fails and cannot be committed in a transactional manner. +Note that if one PropertyBatchOperation in a PropertyBatch fails, +the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. + :param kind: Required. The kind of property batch operation, determined by the operation to be + performed. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". + :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Required. Constant filled by server. - :type kind: str :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue - :param custom_type_id: The property's custom type ID. Using this property, - the user is able to tag the type of the value of the property. + :param custom_type_id: The property's custom type ID. Using this property, the user is able to + tag the type of the value of the property. :type custom_type_id: str """ _validation = { - 'property_name': {'required': True}, 'kind': {'required': True}, + 'property_name': {'required': True}, 'value': {'required': True}, } _attribute_map = { - 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'property_name': {'key': 'PropertyName', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'PropertyValue'}, 'custom_type_id': {'key': 'CustomTypeId', 'type': 'str'}, } - def __init__(self, *, property_name: str, value, custom_type_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + property_name: str, + value: "PropertyValue", + custom_type_id: Optional[str] = None, + **kwargs + ): super(PutPropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.kind = 'Put' # type: str self.value = value self.custom_type_id = custom_type_id - self.kind = 'Put' - - -class ReconfigurationInformation(Model): - """Information about current reconfiguration like phase, type, previous - configuration role of replica and reconfiguration start date time. - - :param previous_configuration_role: Replica role before reconfiguration - started. Possible values include: 'Unknown', 'None', 'Primary', - 'IdleSecondary', 'ActiveSecondary' - :type previous_configuration_role: str or - ~azure.servicefabric.models.ReplicaRole - :param reconfiguration_phase: Current phase of ongoing reconfiguration. If - no reconfiguration is taking place then this value will be "None". - Possible values include: 'Unknown', 'None', 'Phase0', 'Phase1', 'Phase2', - 'Phase3', 'Phase4', 'AbortPhaseZero' - :type reconfiguration_phase: str or - ~azure.servicefabric.models.ReconfigurationPhase - :param reconfiguration_type: Type of current ongoing reconfiguration. If - no reconfiguration is taking place then this value will be "None". - Possible values include: 'Unknown', 'SwapPrimary', 'Failover', 'Other' - :type reconfiguration_type: str or - ~azure.servicefabric.models.ReconfigurationType - :param reconfiguration_start_time_utc: Start time (in UTC) of the ongoing - reconfiguration. If no reconfiguration is taking place then this value - will be zero date-time. - :type reconfiguration_start_time_utc: datetime + + +class ReconfigurationInformation(msrest.serialization.Model): + """Information about current reconfiguration like phase, type, previous configuration role of replica and reconfiguration start date time. + + :param previous_configuration_role: Replica role before reconfiguration started. Possible + values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type previous_configuration_role: str or ~azure.servicefabric.models.ReplicaRole + :param reconfiguration_phase: Current phase of ongoing reconfiguration. If no reconfiguration + is taking place then this value will be "None". Possible values include: "Unknown", "None", + "Phase0", "Phase1", "Phase2", "Phase3", "Phase4", "AbortPhaseZero". + :type reconfiguration_phase: str or ~azure.servicefabric.models.ReconfigurationPhase + :param reconfiguration_type: Type of current ongoing reconfiguration. If no reconfiguration is + taking place then this value will be "None". Possible values include: "Unknown", "SwapPrimary", + "Failover", "Other". + :type reconfiguration_type: str or ~azure.servicefabric.models.ReconfigurationType + :param reconfiguration_start_time_utc: Start time (in UTC) of the ongoing reconfiguration. If + no reconfiguration is taking place then this value will be zero date-time. + :type reconfiguration_start_time_utc: ~datetime.datetime """ _attribute_map = { @@ -16110,7 +19749,15 @@ class ReconfigurationInformation(Model): 'reconfiguration_start_time_utc': {'key': 'ReconfigurationStartTimeUtc', 'type': 'iso-8601'}, } - def __init__(self, *, previous_configuration_role=None, reconfiguration_phase=None, reconfiguration_type=None, reconfiguration_start_time_utc=None, **kwargs) -> None: + def __init__( + self, + *, + previous_configuration_role: Optional[Union[str, "ReplicaRole"]] = None, + reconfiguration_phase: Optional[Union[str, "ReconfigurationPhase"]] = None, + reconfiguration_type: Optional[Union[str, "ReconfigurationType"]] = None, + reconfiguration_start_time_utc: Optional[datetime.datetime] = None, + **kwargs + ): super(ReconfigurationInformation, self).__init__(**kwargs) self.previous_configuration_role = previous_configuration_role self.reconfiguration_phase = reconfiguration_phase @@ -16118,16 +19765,14 @@ def __init__(self, *, previous_configuration_role=None, reconfiguration_phase=No self.reconfiguration_start_time_utc = reconfiguration_start_time_utc -class RegistryCredential(Model): +class RegistryCredential(msrest.serialization.Model): """Credential information to connect to container registry. :param registry_user_name: The user name to connect to container registry. :type registry_user_name: str - :param registry_password: The password for supplied username to connect to - container registry. + :param registry_password: The password for supplied username to connect to container registry. :type registry_password: str - :param password_encrypted: Indicates that supplied container registry - password is encrypted. + :param password_encrypted: Indicates that supplied container registry password is encrypted. :type password_encrypted: bool """ @@ -16137,25 +19782,31 @@ class RegistryCredential(Model): 'password_encrypted': {'key': 'PasswordEncrypted', 'type': 'bool'}, } - def __init__(self, *, registry_user_name: str=None, registry_password: str=None, password_encrypted: bool=None, **kwargs) -> None: + def __init__( + self, + *, + registry_user_name: Optional[str] = None, + registry_password: Optional[str] = None, + password_encrypted: Optional[bool] = None, + **kwargs + ): super(RegistryCredential, self).__init__(**kwargs) self.registry_user_name = registry_user_name self.registry_password = registry_password self.password_encrypted = password_encrypted -class ReliableCollectionsRef(Model): +class ReliableCollectionsRef(msrest.serialization.Model): """Specifying this parameter adds support for reliable collections. All required parameters must be populated in order to send to Azure. - :param name: Required. Name of ReliableCollection resource. Right now it's - not used and you can use any string. + :param name: Required. Name of ReliableCollection resource. Right now it's not used and you can + use any string. :type name: str - :param do_not_persist_state: False (the default) if ReliableCollections - state is persisted to disk as usual. True if you do not want to persist - state, in which case replication is still enabled and you can use - ReliableCollections as distributed cache. + :param do_not_persist_state: False (the default) if ReliableCollections state is persisted to + disk as usual. True if you do not want to persist state, in which case replication is still + enabled and you can use ReliableCollections as distributed cache. :type do_not_persist_state: bool """ @@ -16168,28 +19819,32 @@ class ReliableCollectionsRef(Model): 'do_not_persist_state': {'key': 'doNotPersistState', 'type': 'bool'}, } - def __init__(self, *, name: str, do_not_persist_state: bool=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + do_not_persist_state: Optional[bool] = None, + **kwargs + ): super(ReliableCollectionsRef, self).__init__(**kwargs) self.name = name self.do_not_persist_state = do_not_persist_state -class RemoteReplicatorAcknowledgementDetail(Model): - """Provides various statistics of the acknowledgements that are being received - from the remote replicator. +class RemoteReplicatorAcknowledgementDetail(msrest.serialization.Model): + """Provides various statistics of the acknowledgements that are being received from the remote replicator. - :param average_receive_duration: Represents the average duration it takes - for the remote replicator to receive an operation. + :param average_receive_duration: Represents the average duration it takes for the remote + replicator to receive an operation. :type average_receive_duration: str - :param average_apply_duration: Represents the average duration it takes - for the remote replicator to apply an operation. This usually entails - writing the operation to disk. + :param average_apply_duration: Represents the average duration it takes for the remote + replicator to apply an operation. This usually entails writing the operation to disk. :type average_apply_duration: str - :param not_received_count: Represents the number of operations not yet - received by a remote replicator. + :param not_received_count: Represents the number of operations not yet received by a remote + replicator. :type not_received_count: str - :param received_and_not_applied_count: Represents the number of operations - received and not yet applied by a remote replicator. + :param received_and_not_applied_count: Represents the number of operations received and not yet + applied by a remote replicator. :type received_and_not_applied_count: str """ @@ -16200,7 +19855,15 @@ class RemoteReplicatorAcknowledgementDetail(Model): 'received_and_not_applied_count': {'key': 'ReceivedAndNotAppliedCount', 'type': 'str'}, } - def __init__(self, *, average_receive_duration: str=None, average_apply_duration: str=None, not_received_count: str=None, received_and_not_applied_count: str=None, **kwargs) -> None: + def __init__( + self, + *, + average_receive_duration: Optional[str] = None, + average_apply_duration: Optional[str] = None, + not_received_count: Optional[str] = None, + received_and_not_applied_count: Optional[str] = None, + **kwargs + ): super(RemoteReplicatorAcknowledgementDetail, self).__init__(**kwargs) self.average_receive_duration = average_receive_duration self.average_apply_duration = average_apply_duration @@ -16208,17 +19871,15 @@ def __init__(self, *, average_receive_duration: str=None, average_apply_duration self.received_and_not_applied_count = received_and_not_applied_count -class RemoteReplicatorAcknowledgementStatus(Model): - """Provides details about the remote replicators from the primary replicator's - point of view. +class RemoteReplicatorAcknowledgementStatus(msrest.serialization.Model): + """Provides details about the remote replicators from the primary replicator's point of view. - :param replication_stream_acknowledgement_detail: Details about the - acknowledgements for operations that are part of the replication stream - data. + :param replication_stream_acknowledgement_detail: Details about the acknowledgements for + operations that are part of the replication stream data. :type replication_stream_acknowledgement_detail: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail - :param copy_stream_acknowledgement_detail: Details about the - acknowledgements for operations that are part of the copy stream data. + :param copy_stream_acknowledgement_detail: Details about the acknowledgements for operations + that are part of the copy stream data. :type copy_stream_acknowledgement_detail: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail """ @@ -16228,46 +19889,48 @@ class RemoteReplicatorAcknowledgementStatus(Model): 'copy_stream_acknowledgement_detail': {'key': 'CopyStreamAcknowledgementDetail', 'type': 'RemoteReplicatorAcknowledgementDetail'}, } - def __init__(self, *, replication_stream_acknowledgement_detail=None, copy_stream_acknowledgement_detail=None, **kwargs) -> None: + def __init__( + self, + *, + replication_stream_acknowledgement_detail: Optional["RemoteReplicatorAcknowledgementDetail"] = None, + copy_stream_acknowledgement_detail: Optional["RemoteReplicatorAcknowledgementDetail"] = None, + **kwargs + ): super(RemoteReplicatorAcknowledgementStatus, self).__init__(**kwargs) self.replication_stream_acknowledgement_detail = replication_stream_acknowledgement_detail self.copy_stream_acknowledgement_detail = copy_stream_acknowledgement_detail -class RemoteReplicatorStatus(Model): - """Represents the state of the secondary replicator from the primary - replicator’s point of view. +class RemoteReplicatorStatus(msrest.serialization.Model): + """Represents the state of the secondary replicator from the primary replicator’s point of view. - :param replica_id: Represents the replica ID of the remote secondary - replicator. + :param replica_id: Represents the replica ID of the remote secondary replicator. :type replica_id: str - :param last_acknowledgement_processed_time_utc: The last timestamp (in - UTC) when an acknowledgement from the secondary replicator was processed - on the primary. - UTC 0 represents an invalid value, indicating that no acknowledgement - messages were ever processed. - :type last_acknowledgement_processed_time_utc: datetime - :param last_received_replication_sequence_number: The highest replication - operation sequence number that the secondary has received from the - primary. + :param last_acknowledgement_processed_time_utc: The last timestamp (in UTC) when an + acknowledgement from the secondary replicator was processed on the primary. + UTC 0 represents an invalid value, indicating that no acknowledgement messages were ever + processed. + :type last_acknowledgement_processed_time_utc: ~datetime.datetime + :param last_received_replication_sequence_number: The highest replication operation sequence + number that the secondary has received from the primary. :type last_received_replication_sequence_number: str - :param last_applied_replication_sequence_number: The highest replication - operation sequence number that the secondary has applied to its state. + :param last_applied_replication_sequence_number: The highest replication operation sequence + number that the secondary has applied to its state. :type last_applied_replication_sequence_number: str - :param is_in_build: A value that indicates whether the secondary replica - is in the process of being built. + :param is_in_build: A value that indicates whether the secondary replica is in the process of + being built. :type is_in_build: bool - :param last_received_copy_sequence_number: The highest copy operation - sequence number that the secondary has received from the primary. + :param last_received_copy_sequence_number: The highest copy operation sequence number that the + secondary has received from the primary. A value of -1 implies that the secondary has received all copy operations. :type last_received_copy_sequence_number: str - :param last_applied_copy_sequence_number: The highest copy operation - sequence number that the secondary has applied to its state. - A value of -1 implies that the secondary has applied all copy operations - and the copy process is complete. + :param last_applied_copy_sequence_number: The highest copy operation sequence number that the + secondary has applied to its state. + A value of -1 implies that the secondary has applied all copy operations and the copy process + is complete. :type last_applied_copy_sequence_number: str - :param remote_replicator_acknowledgement_status: Represents the - acknowledgment status for the remote secondary replicator. + :param remote_replicator_acknowledgement_status: Represents the acknowledgment status for the + remote secondary replicator. :type remote_replicator_acknowledgement_status: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementStatus """ @@ -16283,7 +19946,19 @@ class RemoteReplicatorStatus(Model): 'remote_replicator_acknowledgement_status': {'key': 'RemoteReplicatorAcknowledgementStatus', 'type': 'RemoteReplicatorAcknowledgementStatus'}, } - def __init__(self, *, replica_id: str=None, last_acknowledgement_processed_time_utc=None, last_received_replication_sequence_number: str=None, last_applied_replication_sequence_number: str=None, is_in_build: bool=None, last_received_copy_sequence_number: str=None, last_applied_copy_sequence_number: str=None, remote_replicator_acknowledgement_status=None, **kwargs) -> None: + def __init__( + self, + *, + replica_id: Optional[str] = None, + last_acknowledgement_processed_time_utc: Optional[datetime.datetime] = None, + last_received_replication_sequence_number: Optional[str] = None, + last_applied_replication_sequence_number: Optional[str] = None, + is_in_build: Optional[bool] = None, + last_received_copy_sequence_number: Optional[str] = None, + last_applied_copy_sequence_number: Optional[str] = None, + remote_replicator_acknowledgement_status: Optional["RemoteReplicatorAcknowledgementStatus"] = None, + **kwargs + ): super(RemoteReplicatorStatus, self).__init__(**kwargs) self.replica_id = replica_id self.last_acknowledgement_processed_time_utc = last_acknowledgement_processed_time_utc @@ -16295,95 +19970,87 @@ def __init__(self, *, replica_id: str=None, last_acknowledgement_processed_time_ self.remote_replicator_acknowledgement_status = remote_replicator_acknowledgement_status -class RepairTask(Model): - """Represents a repair task, which includes information about what kind of - repair was requested, what its progress is, and what its final result was. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. +class RepairTask(msrest.serialization.Model): + """Represents a repair task, which includes information about what kind of repair was requested, what its progress is, and what its final result was. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str :param version: The version of the repair task. - When creating a new repair task, the version must be set to zero. When - updating a repair task, + When creating a new repair task, the version must be set to zero. When updating a repair + task, the version is used for optimistic concurrency checks. If the version is - set to zero, the update will not check for write conflicts. If the - version is set to a non-zero value, then the - update will only succeed if the actual current version of the repair task - matches this value. + set to zero, the update will not check for write conflicts. If the version is set to a + non-zero value, then the + update will only succeed if the actual current version of the repair task matches this value. :type version: str - :param description: A description of the purpose of the repair task, or - other informational details. + :param description: A description of the purpose of the repair task, or other informational + details. May be set when the repair task is created, and is immutable once set. :type description: str - :param state: Required. The workflow state of the repair task. Valid - initial states are Created, Claimed, and Preparing. Possible values - include: 'Invalid', 'Created', 'Claimed', 'Preparing', 'Approved', - 'Executing', 'Restoring', 'Completed' + :param state: Required. The workflow state of the repair task. Valid initial states are + Created, Claimed, and Preparing. Possible values include: "Invalid", "Created", "Claimed", + "Preparing", "Approved", "Executing", "Restoring", "Completed". :type state: str or ~azure.servicefabric.models.State - :param flags: A bitwise-OR of the following values, which gives additional - details about the status of the repair task. - - 1 - Cancellation of the repair has been requested - - 2 - Abort of the repair has been requested - - 4 - Approval of the repair was forced via client request + :param flags: A bitwise-OR of the following values, which gives additional details about the + status of the repair task. + + + * 1 - Cancellation of the repair has been requested + * 2 - Abort of the repair has been requested + * 4 - Approval of the repair was forced via client request. :type flags: int - :param action: Required. The requested repair action. Must be specified - when the repair task is created, and is immutable once set. + :param action: Required. The requested repair action. Must be specified when the repair task is + created, and is immutable once set. :type action: str - :param target: The target object determines what actions the system will - take to prepare for the impact of the repair, prior to approving execution - of the repair. + :param target: The target object determines what actions the system will take to prepare for + the impact of the repair, prior to approving execution of the repair. May be set when the repair task is created, and is immutable once set. :type target: ~azure.servicefabric.models.RepairTargetDescriptionBase - :param executor: The name of the repair executor. Must be specified in - Claimed and later states, and is immutable once set. + :param executor: The name of the repair executor. Must be specified in Claimed and later + states, and is immutable once set. :type executor: str - :param executor_data: A data string that the repair executor can use to - store its internal state. + :param executor_data: A data string that the repair executor can use to store its internal + state. :type executor_data: str - :param impact: The impact object determines what actions the system will - take to prepare for the impact of the repair, prior to approving execution - of the repair. - Impact must be specified by the repair executor when transitioning to the - Preparing state, and is immutable once set. + :param impact: The impact object determines what actions the system will take to prepare for + the impact of the repair, prior to approving execution of the repair. + Impact must be specified by the repair executor when transitioning to the Preparing state, and + is immutable once set. :type impact: ~azure.servicefabric.models.RepairImpactDescriptionBase - :param result_status: A value describing the overall result of the repair - task execution. Must be specified in the Restoring and later states, and - is immutable once set. Possible values include: 'Invalid', 'Succeeded', - 'Cancelled', 'Interrupted', 'Failed', 'Pending' + :param result_status: A value describing the overall result of the repair task execution. Must + be specified in the Restoring and later states, and is immutable once set. Possible values + include: "Invalid", "Succeeded", "Cancelled", "Interrupted", "Failed", "Pending". :type result_status: str or ~azure.servicefabric.models.ResultStatus - :param result_code: A numeric value providing additional details about the - result of the repair task execution. - May be specified in the Restoring and later states, and is immutable once - set. + :param result_code: A numeric value providing additional details about the result of the repair + task execution. + May be specified in the Restoring and later states, and is immutable once set. :type result_code: int - :param result_details: A string providing additional details about the - result of the repair task execution. - May be specified in the Restoring and later states, and is immutable once - set. + :param result_details: A string providing additional details about the result of the repair + task execution. + May be specified in the Restoring and later states, and is immutable once set. :type result_details: str - :param history: An object that contains timestamps of the repair task's - state transitions. - These timestamps are updated by the system, and cannot be directly - modified. + :param history: An object that contains timestamps of the repair task's state transitions. + These timestamps are updated by the system, and cannot be directly modified. :type history: ~azure.servicefabric.models.RepairTaskHistory - :param preparing_health_check_state: The workflow state of the health - check when the repair task is in the Preparing state. Possible values - include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' + :param preparing_health_check_state: The workflow state of the health check when the repair + task is in the Preparing state. Possible values include: "NotStarted", "InProgress", + "Succeeded", "Skipped", "TimedOut". :type preparing_health_check_state: str or ~azure.servicefabric.models.RepairTaskHealthCheckState - :param restoring_health_check_state: The workflow state of the health - check when the repair task is in the Restoring state. Possible values - include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' + :param restoring_health_check_state: The workflow state of the health check when the repair + task is in the Restoring state. Possible values include: "NotStarted", "InProgress", + "Succeeded", "Skipped", "TimedOut". :type restoring_health_check_state: str or ~azure.servicefabric.models.RepairTaskHealthCheckState - :param perform_preparing_health_check: A value to determine if health - checks will be performed when the repair task enters the Preparing state. + :param perform_preparing_health_check: A value to determine if health checks will be performed + when the repair task enters the Preparing state. :type perform_preparing_health_check: bool - :param perform_restoring_health_check: A value to determine if health - checks will be performed when the repair task enters the Restoring state. + :param perform_restoring_health_check: A value to determine if health checks will be performed + when the repair task enters the Restoring state. :type perform_restoring_health_check: bool """ @@ -16414,7 +20081,29 @@ class RepairTask(Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__(self, *, task_id: str, state, action: str, version: str=None, description: str=None, flags: int=None, target=None, executor: str=None, executor_data: str=None, impact=None, result_status=None, result_code: int=None, result_details: str=None, history=None, preparing_health_check_state=None, restoring_health_check_state=None, perform_preparing_health_check: bool=None, perform_restoring_health_check: bool=None, **kwargs) -> None: + def __init__( + self, + *, + task_id: str, + state: Union[str, "State"], + action: str, + version: Optional[str] = None, + description: Optional[str] = None, + flags: Optional[int] = None, + target: Optional["RepairTargetDescriptionBase"] = None, + executor: Optional[str] = None, + executor_data: Optional[str] = None, + impact: Optional["RepairImpactDescriptionBase"] = None, + result_status: Optional[Union[str, "ResultStatus"]] = None, + result_code: Optional[int] = None, + result_details: Optional[str] = None, + history: Optional["RepairTaskHistory"] = None, + preparing_health_check_state: Optional[Union[str, "RepairTaskHealthCheckState"]] = None, + restoring_health_check_state: Optional[Union[str, "RepairTaskHealthCheckState"]] = None, + perform_preparing_health_check: Optional[bool] = None, + perform_restoring_health_check: Optional[bool] = None, + **kwargs + ): super(RepairTask, self).__init__(**kwargs) self.task_id = task_id self.version = version @@ -16436,19 +20125,18 @@ def __init__(self, *, task_id: str, state, action: str, version: str=None, descr self.perform_restoring_health_check = perform_restoring_health_check -class RepairTaskApproveDescription(Model): +class RepairTaskApproveDescription(msrest.serialization.Model): """Describes a request for forced approval of a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current version of the repair task. If zero, then no version check - is performed. + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. :type version: str """ @@ -16461,29 +20149,34 @@ class RepairTaskApproveDescription(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, *, task_id: str, version: str=None, **kwargs) -> None: + def __init__( + self, + *, + task_id: str, + version: Optional[str] = None, + **kwargs + ): super(RepairTaskApproveDescription, self).__init__(**kwargs) self.task_id = task_id self.version = version -class RepairTaskCancelDescription(Model): +class RepairTaskCancelDescription(msrest.serialization.Model): """Describes a request to cancel a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current version of the repair task. If zero, then no version check - is performed. + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. :type version: str - :param request_abort: _True_ if the repair should be stopped as soon as - possible even if it has already started executing. _False_ if the repair - should be cancelled only if execution has not yet started. + :param request_abort: *True* if the repair should be stopped as soon as possible even if it has + already started executing. *False* if the repair should be cancelled only if execution has not + yet started. :type request_abort: bool """ @@ -16497,27 +20190,32 @@ class RepairTaskCancelDescription(Model): 'request_abort': {'key': 'RequestAbort', 'type': 'bool'}, } - def __init__(self, *, task_id: str, version: str=None, request_abort: bool=None, **kwargs) -> None: + def __init__( + self, + *, + task_id: str, + version: Optional[str] = None, + request_abort: Optional[bool] = None, + **kwargs + ): super(RepairTaskCancelDescription, self).__init__(**kwargs) self.task_id = task_id self.version = version self.request_abort = request_abort -class RepairTaskDeleteDescription(Model): +class RepairTaskDeleteDescription(msrest.serialization.Model): """Describes a request to delete a completed repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. - :param task_id: Required. The ID of the completed repair task to be - deleted. + :param task_id: Required. The ID of the completed repair task to be deleted. :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current version of the repair task. If zero, then no version check - is performed. + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. :type version: str """ @@ -16530,50 +20228,49 @@ class RepairTaskDeleteDescription(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, *, task_id: str, version: str=None, **kwargs) -> None: + def __init__( + self, + *, + task_id: str, + version: Optional[str] = None, + **kwargs + ): super(RepairTaskDeleteDescription, self).__init__(**kwargs) self.task_id = task_id self.version = version -class RepairTaskHistory(Model): +class RepairTaskHistory(msrest.serialization.Model): """A record of the times when the repair task entered each state. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. - - :param created_utc_timestamp: The time when the repair task entered the - Created state. - :type created_utc_timestamp: datetime - :param claimed_utc_timestamp: The time when the repair task entered the - Claimed state. - :type claimed_utc_timestamp: datetime - :param preparing_utc_timestamp: The time when the repair task entered the - Preparing state. - :type preparing_utc_timestamp: datetime - :param approved_utc_timestamp: The time when the repair task entered the - Approved state - :type approved_utc_timestamp: datetime - :param executing_utc_timestamp: The time when the repair task entered the - Executing state - :type executing_utc_timestamp: datetime - :param restoring_utc_timestamp: The time when the repair task entered the - Restoring state - :type restoring_utc_timestamp: datetime - :param completed_utc_timestamp: The time when the repair task entered the - Completed state - :type completed_utc_timestamp: datetime - :param preparing_health_check_start_utc_timestamp: The time when the - repair task started the health check in the Preparing state. - :type preparing_health_check_start_utc_timestamp: datetime - :param preparing_health_check_end_utc_timestamp: The time when the repair - task completed the health check in the Preparing state. - :type preparing_health_check_end_utc_timestamp: datetime - :param restoring_health_check_start_utc_timestamp: The time when the - repair task started the health check in the Restoring state. - :type restoring_health_check_start_utc_timestamp: datetime - :param restoring_health_check_end_utc_timestamp: The time when the repair - task completed the health check in the Restoring state. - :type restoring_health_check_end_utc_timestamp: datetime + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. + + :param created_utc_timestamp: The time when the repair task entered the Created state. + :type created_utc_timestamp: ~datetime.datetime + :param claimed_utc_timestamp: The time when the repair task entered the Claimed state. + :type claimed_utc_timestamp: ~datetime.datetime + :param preparing_utc_timestamp: The time when the repair task entered the Preparing state. + :type preparing_utc_timestamp: ~datetime.datetime + :param approved_utc_timestamp: The time when the repair task entered the Approved state. + :type approved_utc_timestamp: ~datetime.datetime + :param executing_utc_timestamp: The time when the repair task entered the Executing state. + :type executing_utc_timestamp: ~datetime.datetime + :param restoring_utc_timestamp: The time when the repair task entered the Restoring state. + :type restoring_utc_timestamp: ~datetime.datetime + :param completed_utc_timestamp: The time when the repair task entered the Completed state. + :type completed_utc_timestamp: ~datetime.datetime + :param preparing_health_check_start_utc_timestamp: The time when the repair task started the + health check in the Preparing state. + :type preparing_health_check_start_utc_timestamp: ~datetime.datetime + :param preparing_health_check_end_utc_timestamp: The time when the repair task completed the + health check in the Preparing state. + :type preparing_health_check_end_utc_timestamp: ~datetime.datetime + :param restoring_health_check_start_utc_timestamp: The time when the repair task started the + health check in the Restoring state. + :type restoring_health_check_start_utc_timestamp: ~datetime.datetime + :param restoring_health_check_end_utc_timestamp: The time when the repair task completed the + health check in the Restoring state. + :type restoring_health_check_end_utc_timestamp: ~datetime.datetime """ _attribute_map = { @@ -16590,7 +20287,22 @@ class RepairTaskHistory(Model): 'restoring_health_check_end_utc_timestamp': {'key': 'RestoringHealthCheckEndUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, created_utc_timestamp=None, claimed_utc_timestamp=None, preparing_utc_timestamp=None, approved_utc_timestamp=None, executing_utc_timestamp=None, restoring_utc_timestamp=None, completed_utc_timestamp=None, preparing_health_check_start_utc_timestamp=None, preparing_health_check_end_utc_timestamp=None, restoring_health_check_start_utc_timestamp=None, restoring_health_check_end_utc_timestamp=None, **kwargs) -> None: + def __init__( + self, + *, + created_utc_timestamp: Optional[datetime.datetime] = None, + claimed_utc_timestamp: Optional[datetime.datetime] = None, + preparing_utc_timestamp: Optional[datetime.datetime] = None, + approved_utc_timestamp: Optional[datetime.datetime] = None, + executing_utc_timestamp: Optional[datetime.datetime] = None, + restoring_utc_timestamp: Optional[datetime.datetime] = None, + completed_utc_timestamp: Optional[datetime.datetime] = None, + preparing_health_check_start_utc_timestamp: Optional[datetime.datetime] = None, + preparing_health_check_end_utc_timestamp: Optional[datetime.datetime] = None, + restoring_health_check_start_utc_timestamp: Optional[datetime.datetime] = None, + restoring_health_check_end_utc_timestamp: Optional[datetime.datetime] = None, + **kwargs + ): super(RepairTaskHistory, self).__init__(**kwargs) self.created_utc_timestamp = created_utc_timestamp self.claimed_utc_timestamp = claimed_utc_timestamp @@ -16605,29 +20317,26 @@ def __init__(self, *, created_utc_timestamp=None, claimed_utc_timestamp=None, pr self.restoring_health_check_end_utc_timestamp = restoring_health_check_end_utc_timestamp -class RepairTaskUpdateHealthPolicyDescription(Model): +class RepairTaskUpdateHealthPolicyDescription(msrest.serialization.Model): """Describes a request to update the health policy of a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task to be updated. :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current value of the repair task. If zero, then no version check is - performed. + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current value of the repair task. If zero, + then no version check is performed. :type version: str - :param perform_preparing_health_check: A boolean indicating if health - check is to be performed in the Preparing stage of the repair task. If not - specified the existing value should not be altered. Otherwise, specify the - desired new value. + :param perform_preparing_health_check: A boolean indicating if health check is to be performed + in the Preparing stage of the repair task. If not specified the existing value should not be + altered. Otherwise, specify the desired new value. :type perform_preparing_health_check: bool - :param perform_restoring_health_check: A boolean indicating if health - check is to be performed in the Restoring stage of the repair task. If not - specified the existing value should not be altered. Otherwise, specify the - desired new value. + :param perform_restoring_health_check: A boolean indicating if health check is to be performed + in the Restoring stage of the repair task. If not specified the existing value should not be + altered. Otherwise, specify the desired new value. :type perform_restoring_health_check: bool """ @@ -16642,7 +20351,15 @@ class RepairTaskUpdateHealthPolicyDescription(Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__(self, *, task_id: str, version: str=None, perform_preparing_health_check: bool=None, perform_restoring_health_check: bool=None, **kwargs) -> None: + def __init__( + self, + *, + task_id: str, + version: Optional[str] = None, + perform_preparing_health_check: Optional[bool] = None, + perform_restoring_health_check: Optional[bool] = None, + **kwargs + ): super(RepairTaskUpdateHealthPolicyDescription, self).__init__(**kwargs) self.task_id = task_id self.version = version @@ -16650,10 +20367,10 @@ def __init__(self, *, task_id: str, version: str=None, perform_preparing_health_ self.perform_restoring_health_check = perform_restoring_health_check -class RepairTaskUpdateInfo(Model): +class RepairTaskUpdateInfo(msrest.serialization.Model): """Describes the result of an operation that created or updated a repair task. - This type supports the Service Fabric platform; it is not meant to be used - directly from your code. + +This type supports the Service Fabric platform; it is not meant to be used directly from your code. All required parameters must be populated in order to send to Azure. @@ -16669,44 +20386,45 @@ class RepairTaskUpdateInfo(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, *, version: str, **kwargs) -> None: + def __init__( + self, + *, + version: str, + **kwargs + ): super(RepairTaskUpdateInfo, self).__init__(**kwargs) self.version = version class ReplicaHealth(EntityHealth): - """Represents a base class for stateful service replica or stateless service - instance health. - Contains the replica aggregated health state, the health events and the - unhealthy evaluations. + """Represents a base class for stateful service replica or stateless service instance health. +Contains the replica aggregated health state, the health events and the unhealthy evaluations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaHealth, - StatelessServiceInstanceHealth + sub-classes are: StatefulServiceReplicaHealth, StatelessServiceInstanceHealth. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -16718,58 +20436,66 @@ class ReplicaHealth(EntityHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaHealth', 'Stateless': 'StatelessServiceInstanceHealth'} } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + partition_id: Optional[str] = None, + **kwargs + ): super(ReplicaHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) + self.service_kind = 'ReplicaHealth' # type: str self.partition_id = partition_id - self.service_kind = None - self.service_kind = 'ReplicaHealth' class ReplicaHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a replica, containing information about - the data and the algorithm used by health store to evaluate health. The - evaluation is returned only when the aggregated health state is either - Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a replica, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param partition_id: Id of the partition to which the replica belongs. :type partition_id: str - :param replica_or_instance_id: Id of a stateful service replica or a - stateless service instance. This ID is used in the queries that apply to - both stateful and stateless services. It is used by Service Fabric to - uniquely identify a replica of a partition of a stateful service or an - instance of a stateless service partition. It is unique within a partition - and does not change for the lifetime of the replica or the instance. If a - stateful replica gets dropped and another replica gets created on the same - node for the same partition, it will get a different value for the ID. If - a stateless instance is failed over on the same or different node it will + :param replica_or_instance_id: Id of a stateful service replica or a stateless service + instance. This ID is used in the queries that apply to both stateful and stateless services. It + is used by Service Fabric to uniquely identify a replica of a partition of a stateful service + or an instance of a stateless service partition. It is unique within a partition and does not + change for the lifetime of the replica or the instance. If a stateful replica gets dropped and + another replica gets created on the same node for the same partition, it will get a different + value for the ID. If a stateless instance is failed over on the same or different node it will get a different value for the ID. :type replica_or_instance_id: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the replica. The types of the - unhealthy evaluations can be EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the replica. The types of the unhealthy evaluations can be + EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -16777,43 +20503,48 @@ class ReplicaHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, partition_id: str=None, replica_or_instance_id: str=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + partition_id: Optional[str] = None, + replica_or_instance_id: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(ReplicaHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Replica' # type: str self.partition_id = partition_id self.replica_or_instance_id = replica_or_instance_id self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Replica' class ReplicaHealthState(EntityHealthState): - """Represents a base class for stateful service replica or stateless service - instance health state. + """Represents a base class for stateful service replica or stateless service instance health state. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaHealthState, - StatelessServiceInstanceHealthState + sub-classes are: StatefulServiceReplicaHealthState, StatelessServiceInstanceHealthState. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param partition_id: The ID of the partition to which this replica - belongs. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param partition_id: The ID of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -16822,40 +20553,41 @@ class ReplicaHealthState(EntityHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaHealthState', 'Stateless': 'StatelessServiceInstanceHealthState'} } - def __init__(self, *, aggregated_health_state=None, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + partition_id: Optional[str] = None, + **kwargs + ): super(ReplicaHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) + self.service_kind = 'ReplicaHealthState' # type: str self.partition_id = partition_id - self.service_kind = None - self.service_kind = 'ReplicaHealthState' class ReplicaHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a stateful service replica or a - stateless service instance. - The replica health state contains the replica ID and its aggregated health - state. - - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + """Represents the health state chunk of a stateful service replica or a stateless service instance. +The replica health state contains the replica ID and its aggregated health state. + + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param replica_or_instance_id: Id of a stateful service replica or a - stateless service instance. This ID is used in the queries that apply to - both stateful and stateless services. It is used by Service Fabric to - uniquely identify a replica of a partition of a stateful service or an - instance of a stateless service partition. It is unique within a partition - and does not change for the lifetime of the replica or the instance. If a - stateful replica gets dropped and another replica gets created on the same - node for the same partition, it will get a different value for the ID. If - a stateless instance is failed over on the same or different node it will + :param replica_or_instance_id: Id of a stateful service replica or a stateless service + instance. This ID is used in the queries that apply to both stateful and stateless services. It + is used by Service Fabric to uniquely identify a replica of a partition of a stateful service + or an instance of a stateless service partition. It is unique within a partition and does not + change for the lifetime of the replica or the instance. If a stateful replica gets dropped and + another replica gets created on the same node for the same partition, it will get a different + value for the ID. If a stateless instance is failed over on the same or different node it will get a different value for the ID. :type replica_or_instance_id: str """ @@ -16865,17 +20597,22 @@ class ReplicaHealthStateChunk(EntityHealthStateChunk): 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, } - def __init__(self, *, health_state=None, replica_or_instance_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + replica_or_instance_id: Optional[str] = None, + **kwargs + ): super(ReplicaHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.replica_or_instance_id = replica_or_instance_id -class ReplicaHealthStateChunkList(Model): - """The list of replica health state chunks that respect the input filters in - the chunk query. Returned by get cluster health state chunks query. +class ReplicaHealthStateChunkList(msrest.serialization.Model): + """The list of replica health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param items: The list of replica health state chunks that respect the - input filters in the chunk query. + :param items: The list of replica health state chunks that respect the input filters in the + chunk query. :type items: list[~azure.servicefabric.models.ReplicaHealthStateChunk] """ @@ -16883,56 +20620,51 @@ class ReplicaHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[ReplicaHealthStateChunk]'}, } - def __init__(self, *, items=None, **kwargs) -> None: + def __init__( + self, + *, + items: Optional[List["ReplicaHealthStateChunk"]] = None, + **kwargs + ): super(ReplicaHealthStateChunkList, self).__init__(**kwargs) self.items = items -class ReplicaHealthStateFilter(Model): - """Defines matching criteria to determine whether a replica should be included - as a child of a partition in the cluster health chunk. - The replicas are only returned if the parent entities match a filter - specified in the cluster health chunk query description. The parent - partition, service and application must be included in the cluster health - chunk. - One filter can match zero, one or multiple replicas, depending on its - properties. - - :param replica_or_instance_id_filter: Id of the stateful service replica - or stateless service instance that matches the filter. The filter is - applied only to the specified replica, if it exists. - If the replica doesn't exist, no replica is returned in the cluster health - chunk based on this filter. - If the replica exists, it is included in the cluster health chunk if it - respects the other filter properties. - If not specified, all replicas that match the parent filters (if any) are - taken into consideration and matched against the other filter members, - like health state filter. +class ReplicaHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a replica should be included as a child of a partition in the cluster health chunk. +The replicas are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent partition, service and application must be included in the cluster health chunk. +One filter can match zero, one or multiple replicas, depending on its properties. + + :param replica_or_instance_id_filter: Id of the stateful service replica or stateless service + instance that matches the filter. The filter is applied only to the specified replica, if it + exists. + If the replica doesn't exist, no replica is returned in the cluster health chunk based on this + filter. + If the replica exists, it is included in the cluster health chunk if it respects the other + filter properties. + If not specified, all replicas that match the parent filters (if any) are taken into + consideration and matched against the other filter members, like health state filter. :type replica_or_instance_id_filter: str - :param health_state_filter: The filter for the health state of the - replicas. It allows selecting replicas if they match the desired health - states. - The possible values are integer value of one of the following health - states. Only replicas that match the filter are returned. All replicas are - used to evaluate the parent partition aggregated health state. - If not specified, default value is None, unless the replica ID is - specified. If the filter has default value and replica ID is specified, - the matching replica is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches replicas with - HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the replicas. It allows + selecting replicas if they match the desired health states. + The possible values are integer value of one of the following health states. Only replicas + that match the filter are returned. All replicas are used to evaluate the parent partition + aggregated health state. + If not specified, default value is None, unless the replica ID is specified. If the filter has + default value and replica ID is specified, the matching replica is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches replicas with HealthState value of OK (2) + and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int """ @@ -16941,38 +20673,42 @@ class ReplicaHealthStateFilter(Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__(self, *, replica_or_instance_id_filter: str=None, health_state_filter: int=0, **kwargs) -> None: + def __init__( + self, + *, + replica_or_instance_id_filter: Optional[str] = None, + health_state_filter: Optional[int] = 0, + **kwargs + ): super(ReplicaHealthStateFilter, self).__init__(**kwargs) self.replica_or_instance_id_filter = replica_or_instance_id_filter self.health_state_filter = health_state_filter -class ReplicaInfo(Model): - """Information about the identity, status, health, node name, uptime, and - other details about the replica. +class ReplicaInfo(msrest.serialization.Model): + """Information about the identity, status, health, node name, uptime, and other details about the replica. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo + sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo. All required parameters must be populated in order to send to Azure. - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of - the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. :type last_in_build_duration_in_seconds: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -16980,38 +20716,73 @@ class ReplicaInfo(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaInfo', 'Stateless': 'StatelessServiceInstanceInfo'} } - def __init__(self, *, replica_status=None, health_state=None, node_name: str=None, address: str=None, last_in_build_duration_in_seconds: str=None, **kwargs) -> None: + def __init__( + self, + *, + replica_status: Optional[Union[str, "ReplicaStatus"]] = None, + health_state: Optional[Union[str, "HealthState"]] = None, + node_name: Optional[str] = None, + address: Optional[str] = None, + last_in_build_duration_in_seconds: Optional[str] = None, + **kwargs + ): super(ReplicaInfo, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.replica_status = replica_status self.health_state = health_state self.node_name = node_name self.address = address self.last_in_build_duration_in_seconds = last_in_build_duration_in_seconds - self.service_kind = None -class ReplicaMetricLoadDescription(Model): - """Specifies metric loads of a partition's specific secondary replica or - instance. +class ReplicaLifecycleDescription(msrest.serialization.Model): + """Describes how the replica will behave. + + :param is_singleton_replica_move_allowed_during_upgrade: If set to true, replicas with a target + replica set size of 1 will be permitted to move during upgrade. + :type is_singleton_replica_move_allowed_during_upgrade: bool + :param restore_replica_location_after_upgrade: If set to true, move/swap replica to original + location after upgrade. + :type restore_replica_location_after_upgrade: bool + """ + + _attribute_map = { + 'is_singleton_replica_move_allowed_during_upgrade': {'key': 'IsSingletonReplicaMoveAllowedDuringUpgrade', 'type': 'bool'}, + 'restore_replica_location_after_upgrade': {'key': 'RestoreReplicaLocationAfterUpgrade', 'type': 'bool'}, + } + + def __init__( + self, + *, + is_singleton_replica_move_allowed_during_upgrade: Optional[bool] = None, + restore_replica_location_after_upgrade: Optional[bool] = None, + **kwargs + ): + super(ReplicaLifecycleDescription, self).__init__(**kwargs) + self.is_singleton_replica_move_allowed_during_upgrade = is_singleton_replica_move_allowed_during_upgrade + self.restore_replica_location_after_upgrade = restore_replica_location_after_upgrade + + +class ReplicaMetricLoadDescription(msrest.serialization.Model): + """Specifies metric loads of a partition's specific secondary replica or instance. :param node_name: Node name of a specific secondary replica or instance. :type node_name: str - :param replica_or_instance_load_entries: Loads of a different metrics for - a partition's secondary replica or instance. - :type replica_or_instance_load_entries: - list[~azure.servicefabric.models.MetricLoadDescription] + :param replica_or_instance_load_entries: Loads of a different metrics for a partition's + secondary replica or instance. + :type replica_or_instance_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] """ _attribute_map = { @@ -17019,43 +20790,48 @@ class ReplicaMetricLoadDescription(Model): 'replica_or_instance_load_entries': {'key': 'ReplicaOrInstanceLoadEntries', 'type': '[MetricLoadDescription]'}, } - def __init__(self, *, node_name: str=None, replica_or_instance_load_entries=None, **kwargs) -> None: + def __init__( + self, + *, + node_name: Optional[str] = None, + replica_or_instance_load_entries: Optional[List["MetricLoadDescription"]] = None, + **kwargs + ): super(ReplicaMetricLoadDescription, self).__init__(**kwargs) self.node_name = node_name self.replica_or_instance_load_entries = replica_or_instance_load_entries class ReplicasHealthEvaluation(HealthEvaluation): - """Represents health evaluation for replicas, containing health evaluations - for each unhealthy replica that impacted current aggregated health state. - Can be returned when evaluating partition health and the aggregated health - state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for replicas, containing health evaluations for each unhealthy replica that impacted current aggregated health state. Can be returned when evaluating partition health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param max_percent_unhealthy_replicas_per_partition: Maximum allowed - percentage of unhealthy replicas per partition from the - ApplicationHealthPolicy. + :param max_percent_unhealthy_replicas_per_partition: Maximum allowed percentage of unhealthy + replicas per partition from the ApplicationHealthPolicy. :type max_percent_unhealthy_replicas_per_partition: int - :param total_count: Total number of replicas in the partition from the - health store. + :param total_count: Total number of replicas in the partition from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - ReplicaHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy ReplicaHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -17063,58 +20839,60 @@ class ReplicasHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_replicas_per_partition': {'key': 'MaxPercentUnhealthyReplicasPerPartition', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_replicas_per_partition: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + max_percent_unhealthy_replicas_per_partition: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(ReplicasHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Replicas' # type: str self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Replicas' -class ReplicatorQueueStatus(Model): - """Provides various statistics of the queue used in the service fabric - replicator. - Contains information about the service fabric replicator like the - replication/copy queue utilization, last acknowledgement received - timestamp, etc. - Depending on the role of the replicator, the properties in this type imply - different meanings. +class ReplicatorQueueStatus(msrest.serialization.Model): + """Provides various statistics of the queue used in the service fabric replicator. +Contains information about the service fabric replicator like the replication/copy queue utilization, last acknowledgement received timestamp, etc. +Depending on the role of the replicator, the properties in this type imply different meanings. - :param queue_utilization_percentage: Represents the utilization of the - queue. A value of 0 indicates that the queue is empty and a value of 100 - indicates the queue is full. + :param queue_utilization_percentage: Represents the utilization of the queue. A value of 0 + indicates that the queue is empty and a value of 100 indicates the queue is full. :type queue_utilization_percentage: int - :param queue_memory_size: Represents the virtual memory consumed by the - queue in bytes. + :param queue_memory_size: Represents the virtual memory consumed by the queue in bytes. :type queue_memory_size: str - :param first_sequence_number: On a primary replicator, this is - semantically the sequence number of the operation for which all the - secondary replicas have sent an acknowledgement. - On a secondary replicator, this is the smallest sequence number of the - operation that is present in the queue. + :param first_sequence_number: On a primary replicator, this is semantically the sequence number + of the operation for which all the secondary replicas have sent an acknowledgement. + On a secondary replicator, this is the smallest sequence number of the operation that is + present in the queue. :type first_sequence_number: str - :param completed_sequence_number: On a primary replicator, this is - semantically the highest sequence number of the operation for which all - the secondary replicas have sent an acknowledgement. - On a secondary replicator, this is semantically the highest sequence - number that has been applied to the persistent state. + :param completed_sequence_number: On a primary replicator, this is semantically the highest + sequence number of the operation for which all the secondary replicas have sent an + acknowledgement. + On a secondary replicator, this is semantically the highest sequence number that has been + applied to the persistent state. :type completed_sequence_number: str - :param committed_sequence_number: On a primary replicator, this is - semantically the highest sequence number of the operation for which a - write quorum of the secondary replicas have sent an acknowledgement. - On a secondary replicator, this is semantically the highest sequence - number of the in-order operation received from the primary. + :param committed_sequence_number: On a primary replicator, this is semantically the highest + sequence number of the operation for which a write quorum of the secondary replicas have sent + an acknowledgement. + On a secondary replicator, this is semantically the highest sequence number of the in-order + operation received from the primary. :type committed_sequence_number: str - :param last_sequence_number: Represents the latest sequence number of the - operation that is available in the queue. + :param last_sequence_number: Represents the latest sequence number of the operation that is + available in the queue. :type last_sequence_number: str """ @@ -17127,7 +20905,17 @@ class ReplicatorQueueStatus(Model): 'last_sequence_number': {'key': 'LastSequenceNumber', 'type': 'str'}, } - def __init__(self, *, queue_utilization_percentage: int=None, queue_memory_size: str=None, first_sequence_number: str=None, completed_sequence_number: str=None, committed_sequence_number: str=None, last_sequence_number: str=None, **kwargs) -> None: + def __init__( + self, + *, + queue_utilization_percentage: Optional[int] = None, + queue_memory_size: Optional[str] = None, + first_sequence_number: Optional[str] = None, + completed_sequence_number: Optional[str] = None, + committed_sequence_number: Optional[str] = None, + last_sequence_number: Optional[str] = None, + **kwargs + ): super(ReplicatorQueueStatus, self).__init__(**kwargs) self.queue_utilization_percentage = queue_utilization_percentage self.queue_memory_size = queue_memory_size @@ -17137,16 +20925,14 @@ def __init__(self, *, queue_utilization_percentage: int=None, queue_memory_size: self.last_sequence_number = last_sequence_number -class ResolvedServiceEndpoint(Model): +class ResolvedServiceEndpoint(msrest.serialization.Model): """Endpoint of a resolved service partition. - :param kind: The role of the replica where the endpoint is reported. - Possible values include: 'Invalid', 'Stateless', 'StatefulPrimary', - 'StatefulSecondary' + :param kind: The role of the replica where the endpoint is reported. Possible values include: + "Invalid", "Stateless", "StatefulPrimary", "StatefulSecondary". :type kind: str or ~azure.servicefabric.models.ServiceEndpointRole - :param address: The address of the endpoint. If the endpoint has multiple - listeners the address is a JSON object with one property per listener with - the value as the address of that listener. + :param address: The address of the endpoint. If the endpoint has multiple listeners the address + is a JSON object with one property per listener with the value as the address of that listener. :type address: str """ @@ -17155,30 +20941,32 @@ class ResolvedServiceEndpoint(Model): 'address': {'key': 'Address', 'type': 'str'}, } - def __init__(self, *, kind=None, address: str=None, **kwargs) -> None: + def __init__( + self, + *, + kind: Optional[Union[str, "ServiceEndpointRole"]] = None, + address: Optional[str] = None, + **kwargs + ): super(ResolvedServiceEndpoint, self).__init__(**kwargs) self.kind = kind self.address = address -class ResolvedServicePartition(Model): +class ResolvedServicePartition(msrest.serialization.Model): """Information about a service partition and its associated endpoints. All required parameters must be populated in order to send to Azure. - :param name: Required. The full name of the service with 'fabric:' URI - scheme. + :param name: Required. The full name of the service with 'fabric:' URI scheme. :type name: str - :param partition_information: Required. A representation of the resolved - partition. - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param endpoints: Required. List of resolved service endpoints of a - service partition. + :param partition_information: Required. A representation of the resolved partition. + :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param endpoints: Required. List of resolved service endpoints of a service partition. :type endpoints: list[~azure.servicefabric.models.ResolvedServiceEndpoint] - :param version: Required. The version of this resolved service partition - result. This version should be passed in the next time the ResolveService - call is made via the PreviousRspVersion query parameter. + :param version: Required. The version of this resolved service partition result. This version + should be passed in the next time the ResolveService call is made via the PreviousRspVersion + query parameter. :type version: str """ @@ -17196,7 +20984,15 @@ class ResolvedServicePartition(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, *, name: str, partition_information, endpoints, version: str, **kwargs) -> None: + def __init__( + self, + *, + name: str, + partition_information: "PartitionInformation", + endpoints: List["ResolvedServiceEndpoint"], + version: str, + **kwargs + ): super(ResolvedServicePartition, self).__init__(**kwargs) self.name = name self.partition_information = partition_information @@ -17204,15 +21000,12 @@ def __init__(self, *, name: str, partition_information, endpoints, version: str, self.version = version -class ResourceLimits(Model): - """This type describes the resource limits for a given container. It describes - the most amount of resources a container is allowed to use before being - restarted. +class ResourceLimits(msrest.serialization.Model): + """This type describes the resource limits for a given container. It describes the most amount of resources a container is allowed to use before being restarted. :param memory_in_gb: The memory limit in GB. :type memory_in_gb: float - :param cpu: CPU limits in cores. At present, only full cores are - supported. + :param cpu: CPU limits in cores. At present, only full cores are supported. :type cpu: float """ @@ -17221,26 +21014,26 @@ class ResourceLimits(Model): 'cpu': {'key': 'cpu', 'type': 'float'}, } - def __init__(self, *, memory_in_gb: float=None, cpu: float=None, **kwargs) -> None: + def __init__( + self, + *, + memory_in_gb: Optional[float] = None, + cpu: Optional[float] = None, + **kwargs + ): super(ResourceLimits, self).__init__(**kwargs) self.memory_in_gb = memory_in_gb self.cpu = cpu -class ResourceRequests(Model): - """This type describes the requested resources for a given container. It - describes the least amount of resources required for the container. A - container can consume more than requested resources up to the specified - limits before being restarted. Currently, the requested resources are - treated as limits. +class ResourceRequests(msrest.serialization.Model): + """This type describes the requested resources for a given container. It describes the least amount of resources required for the container. A container can consume more than requested resources up to the specified limits before being restarted. Currently, the requested resources are treated as limits. All required parameters must be populated in order to send to Azure. - :param memory_in_gb: Required. The memory request in GB for this - container. + :param memory_in_gb: Required. The memory request in GB for this container. :type memory_in_gb: float - :param cpu: Required. Requested number of CPU cores. At present, only full - cores are supported. + :param cpu: Required. Requested number of CPU cores. At present, only full cores are supported. :type cpu: float """ @@ -17254,22 +21047,26 @@ class ResourceRequests(Model): 'cpu': {'key': 'cpu', 'type': 'float'}, } - def __init__(self, *, memory_in_gb: float, cpu: float, **kwargs) -> None: + def __init__( + self, + *, + memory_in_gb: float, + cpu: float, + **kwargs + ): super(ResourceRequests, self).__init__(**kwargs) self.memory_in_gb = memory_in_gb self.cpu = cpu -class ResourceRequirements(Model): +class ResourceRequirements(msrest.serialization.Model): """This type describes the resource requirements for a container or a service. All required parameters must be populated in order to send to Azure. - :param requests: Required. Describes the requested resources for a given - container. + :param requests: Required. Describes the requested resources for a given container. :type requests: ~azure.servicefabric.models.ResourceRequests - :param limits: Describes the maximum limits on the resources for a given - container. + :param limits: Describes the maximum limits on the resources for a given container. :type limits: ~azure.servicefabric.models.ResourceLimits """ @@ -17282,41 +21079,45 @@ class ResourceRequirements(Model): 'limits': {'key': 'limits', 'type': 'ResourceLimits'}, } - def __init__(self, *, requests, limits=None, **kwargs) -> None: + def __init__( + self, + *, + requests: "ResourceRequests", + limits: Optional["ResourceLimits"] = None, + **kwargs + ): super(ResourceRequirements, self).__init__(**kwargs) self.requests = requests self.limits = limits -class RestartDeployedCodePackageDescription(Model): - """Defines description for restarting a deployed code package on Service - Fabric node. +class RestartDeployedCodePackageDescription(msrest.serialization.Model): + """Defines description for restarting a deployed code package on Service Fabric node. All required parameters must be populated in order to send to Azure. - :param service_manifest_name: Required. The name of service manifest that - specified this code package. + :param service_manifest_name: Required. The name of service manifest that specified this code + package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed - service package. If ServicePackageActivationMode specified at the time of - creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults - to 'SharedProcess'), then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed service package. If + ServicePackageActivationMode specified at the time of creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), + then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param code_package_name: Required. The name of the code package defined - in the service manifest. + :param code_package_name: Required. The name of the code package defined in the service + manifest. :type code_package_name: str - :param code_package_instance_id: Required. The instance ID for currently - running entry point. For a code package setup entry point (if specified) - runs first and after it finishes main entry point is started. - Each time entry point executable is run, its instance ID will change. If 0 - is passed in as the code package instance ID, the API will restart the - code package with whatever instance ID it is currently running. - If an instance ID other than 0 is passed in, the API will restart the code - package only if the current Instance ID matches the passed in instance ID. - Note, passing in the exact instance ID (not 0) in the API is safer, - because if ensures at most one restart of the code package. + :param code_package_instance_id: Required. The instance ID for currently running entry point. + For a code package setup entry point (if specified) runs first and after it finishes main entry + point is started. + Each time entry point executable is run, its instance ID will change. If 0 is passed in as the + code package instance ID, the API will restart the code package with whatever instance ID it is + currently running. + If an instance ID other than 0 is passed in, the API will restart the code package only if the + current Instance ID matches the passed in instance ID. + Note, passing in the exact instance ID (not 0) in the API is safer, because if ensures at most + one restart of the code package. :type code_package_instance_id: str """ @@ -17333,7 +21134,15 @@ class RestartDeployedCodePackageDescription(Model): 'code_package_instance_id': {'key': 'CodePackageInstanceId', 'type': 'str'}, } - def __init__(self, *, service_manifest_name: str, code_package_name: str, code_package_instance_id: str, service_package_activation_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + service_manifest_name: str, + code_package_name: str, + code_package_instance_id: str, + service_package_activation_id: Optional[str] = None, + **kwargs + ): super(RestartDeployedCodePackageDescription, self).__init__(**kwargs) self.service_manifest_name = service_manifest_name self.service_package_activation_id = service_package_activation_id @@ -17341,22 +21150,19 @@ def __init__(self, *, service_manifest_name: str, code_package_name: str, code_p self.code_package_instance_id = code_package_instance_id -class RestartNodeDescription(Model): +class RestartNodeDescription(msrest.serialization.Model): """Describes the parameters to restart a Service Fabric node. All required parameters must be populated in order to send to Azure. - :param node_instance_id: Required. The instance ID of the target node. If - instance ID is specified the node is restarted only if it matches with the - current instance of the node. A default value of "0" would match any - instance ID. The instance ID can be obtained using get node query. Default - value: "0" . + :param node_instance_id: Required. The instance ID of the target node. If instance ID is + specified the node is restarted only if it matches with the current instance of the node. A + default value of "0" would match any instance ID. The instance ID can be obtained using get + node query. :type node_instance_id: str - :param create_fabric_dump: Specify True to create a dump of the fabric - node process. This is case-sensitive. Possible values include: 'False', - 'True'. Default value: "False" . - :type create_fabric_dump: str or - ~azure.servicefabric.models.CreateFabricDump + :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is + case-sensitive. Possible values include: "False", "True". Default value: "False". + :type create_fabric_dump: str or ~azure.servicefabric.models.CreateFabricDump """ _validation = { @@ -17368,21 +21174,26 @@ class RestartNodeDescription(Model): 'create_fabric_dump': {'key': 'CreateFabricDump', 'type': 'str'}, } - def __init__(self, *, node_instance_id: str="0", create_fabric_dump="False", **kwargs) -> None: + def __init__( + self, + *, + node_instance_id: str = "0", + create_fabric_dump: Optional[Union[str, "CreateFabricDump"]] = "False", + **kwargs + ): super(RestartNodeDescription, self).__init__(**kwargs) self.node_instance_id = node_instance_id self.create_fabric_dump = create_fabric_dump -class RestartPartitionResult(Model): - """Represents information about an operation in a terminal state (Completed or - Faulted). +class RestartPartitionResult(msrest.serialization.Model): + """Represents information about an operation in a terminal state (Completed or Faulted). - :param error_code: If OperationState is Completed, this is 0. If - OperationState is Faulted, this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, + this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the - partition that the user-induced operation acted upon. + :param selected_partition: This class returns information about the partition that the + user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -17391,25 +21202,29 @@ class RestartPartitionResult(Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__(self, *, error_code: int=None, selected_partition=None, **kwargs) -> None: + def __init__( + self, + *, + error_code: Optional[int] = None, + selected_partition: Optional["SelectedPartition"] = None, + **kwargs + ): super(RestartPartitionResult, self).__init__(**kwargs) self.error_code = error_code self.selected_partition = selected_partition -class RestorePartitionDescription(Model): - """Specifies the parameters needed to trigger a restore of a specific - partition. +class RestorePartitionDescription(msrest.serialization.Model): + """Specifies the parameters needed to trigger a restore of a specific partition. All required parameters must be populated in order to send to Azure. :param backup_id: Required. Unique backup ID. :type backup_id: str - :param backup_location: Required. Location of the backup relative to the - backup storage specified/ configured. + :param backup_location: Required. Location of the backup relative to the backup storage + specified/ configured. :type backup_location: str - :param backup_storage: Location of the backup from where the partition - will be restored. + :param backup_storage: Location of the backup from where the partition will be restored. :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription """ @@ -17424,29 +21239,33 @@ class RestorePartitionDescription(Model): 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, } - def __init__(self, *, backup_id: str, backup_location: str, backup_storage=None, **kwargs) -> None: + def __init__( + self, + *, + backup_id: str, + backup_location: str, + backup_storage: Optional["BackupStorageDescription"] = None, + **kwargs + ): super(RestorePartitionDescription, self).__init__(**kwargs) self.backup_id = backup_id self.backup_location = backup_location self.backup_storage = backup_storage -class RestoreProgressInfo(Model): +class RestoreProgressInfo(msrest.serialization.Model): """Describes the progress of a restore operation on a partition. - :param restore_state: Represents the current state of the partition - restore operation. Possible values include: 'Invalid', 'Accepted', - 'RestoreInProgress', 'Success', 'Failure', 'Timeout' + :param restore_state: Represents the current state of the partition restore operation. Possible + values include: "Invalid", "Accepted", "RestoreInProgress", "Success", "Failure", "Timeout". :type restore_state: str or ~azure.servicefabric.models.RestoreState :param time_stamp_utc: Timestamp when operation succeeded or failed. - :type time_stamp_utc: datetime - :param restored_epoch: Describes the epoch at which the partition is - restored. + :type time_stamp_utc: ~datetime.datetime + :param restored_epoch: Describes the epoch at which the partition is restored. :type restored_epoch: ~azure.servicefabric.models.Epoch :param restored_lsn: Restored LSN. :type restored_lsn: str - :param failure_error: Denotes the failure encountered in performing - restore operation. + :param failure_error: Denotes the failure encountered in performing restore operation. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -17458,7 +21277,16 @@ class RestoreProgressInfo(Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__(self, *, restore_state=None, time_stamp_utc=None, restored_epoch=None, restored_lsn: str=None, failure_error=None, **kwargs) -> None: + def __init__( + self, + *, + restore_state: Optional[Union[str, "RestoreState"]] = None, + time_stamp_utc: Optional[datetime.datetime] = None, + restored_epoch: Optional["Epoch"] = None, + restored_lsn: Optional[str] = None, + failure_error: Optional["FabricErrorError"] = None, + **kwargs + ): super(RestoreProgressInfo, self).__init__(**kwargs) self.restore_state = restore_state self.time_stamp_utc = time_stamp_utc @@ -17467,14 +21295,13 @@ def __init__(self, *, restore_state=None, time_stamp_utc=None, restored_epoch=No self.failure_error = failure_error -class ResumeApplicationUpgradeDescription(Model): - """Describes the parameters for resuming an unmonitored manual Service Fabric - application upgrade. +class ResumeApplicationUpgradeDescription(msrest.serialization.Model): + """Describes the parameters for resuming an unmonitored manual Service Fabric application upgrade. All required parameters must be populated in order to send to Azure. - :param upgrade_domain_name: Required. The name of the upgrade domain in - which to resume the upgrade. + :param upgrade_domain_name: Required. The name of the upgrade domain in which to resume the + upgrade. :type upgrade_domain_name: str """ @@ -17486,18 +21313,22 @@ class ResumeApplicationUpgradeDescription(Model): 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, } - def __init__(self, *, upgrade_domain_name: str, **kwargs) -> None: + def __init__( + self, + *, + upgrade_domain_name: str, + **kwargs + ): super(ResumeApplicationUpgradeDescription, self).__init__(**kwargs) self.upgrade_domain_name = upgrade_domain_name -class ResumeClusterUpgradeDescription(Model): +class ResumeClusterUpgradeDescription(msrest.serialization.Model): """Describes the parameters for resuming a cluster upgrade. All required parameters must be populated in order to send to Azure. - :param upgrade_domain: Required. The next upgrade domain for this cluster - upgrade. + :param upgrade_domain: Required. The next upgrade domain for this cluster upgrade. :type upgrade_domain: str """ @@ -17509,83 +21340,76 @@ class ResumeClusterUpgradeDescription(Model): 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, } - def __init__(self, *, upgrade_domain: str, **kwargs) -> None: + def __init__( + self, + *, + upgrade_domain: str, + **kwargs + ): super(ResumeClusterUpgradeDescription, self).__init__(**kwargs) self.upgrade_domain = upgrade_domain -class RollingUpgradeUpdateDescription(Model): - """Describes the parameters for updating a rolling upgrade of application or - cluster. +class RollingUpgradeUpdateDescription(msrest.serialization.Model): + """Describes the parameters for updating a rolling upgrade of application or cluster. All required parameters must be populated in order to send to Azure. - :param rolling_upgrade_mode: Required. The mode used to monitor health - during a rolling upgrade. The values are UnmonitoredAuto, - UnmonitoredManual, and Monitored. Possible values include: 'Invalid', - 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: - "UnmonitoredAuto" . + :param rolling_upgrade_mode: Required. The mode used to monitor health during a rolling + upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values + include: "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param replica_set_check_timeout_in_milliseconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param replica_set_check_timeout_in_milliseconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type replica_set_check_timeout_in_milliseconds: long - :param failure_action: The compensating action to perform when a Monitored - upgrade encounters monitoring policy or health policy violations. - Invalid indicates the failure action is invalid. Rollback specifies that - the upgrade will start rolling back automatically. - Manual indicates that the upgrade will switch to UnmonitoredManual upgrade - mode. Possible values include: 'Invalid', 'Rollback', 'Manual' + :param failure_action: The compensating action to perform when a Monitored upgrade encounters + monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will + start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. Possible + values include: "Invalid", "Rollback", "Manual". :type failure_action: str or ~azure.servicefabric.models.FailureAction - :param health_check_wait_duration_in_milliseconds: The amount of time to - wait after completing an upgrade domain before applying health policies. - It is first interpreted as a string representing an ISO 8601 duration. If - that fails, then it is interpreted as a number representing the total - number of milliseconds. + :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing + an upgrade domain before applying health policies. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted as a number + representing the total number of milliseconds. :type health_check_wait_duration_in_milliseconds: str - :param health_check_stable_duration_in_milliseconds: The amount of time - that the application or cluster must remain healthy before the upgrade - proceeds to the next upgrade domain. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param health_check_stable_duration_in_milliseconds: The amount of time that the application or + cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type health_check_stable_duration_in_milliseconds: str - :param health_check_retry_timeout_in_milliseconds: The amount of time to - retry health evaluation when the application or cluster is unhealthy - before FailureAction is executed. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted - as a number representing the total number of milliseconds. + :param health_check_retry_timeout_in_milliseconds: The amount of time to retry health + evaluation when the application or cluster is unhealthy before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that fails, then it is + interpreted as a number representing the total number of milliseconds. :type health_check_retry_timeout_in_milliseconds: str - :param upgrade_timeout_in_milliseconds: The amount of time the overall - upgrade has to complete before FailureAction is executed. It is first - interpreted as a string representing an ISO 8601 duration. If that fails, - then it is interpreted as a number representing the total number of + :param upgrade_timeout_in_milliseconds: The amount of time the overall upgrade has to complete + before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 + duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout_in_milliseconds: str - :param upgrade_domain_timeout_in_milliseconds: The amount of time each - upgrade domain has to complete before FailureAction is executed. It is - first interpreted as a string representing an ISO 8601 duration. If that - fails, then it is interpreted as a number representing the total number of - milliseconds. + :param upgrade_domain_timeout_in_milliseconds: The amount of time each upgrade domain has to + complete before FailureAction is executed. It is first interpreted as a string representing an + ISO 8601 duration. If that fails, then it is interpreted as a number representing the total + number of milliseconds. :type upgrade_domain_timeout_in_milliseconds: str - :param instance_close_delay_duration_in_seconds: Duration in seconds, to - wait before a stateless instance is closed, to allow the active requests - to drain gracefully. This would be effective when the instance is closing - during the application/cluster - upgrade, only for those instances which have a non-zero delay duration - configured in the service description. See - InstanceCloseDelayDurationSeconds property in $ref: + :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a + stateless instance is closed, to allow the active requests to drain gracefully. This would be + effective when the instance is closing during the application/cluster + upgrade, only for those instances which have a non-zero delay duration configured in the + service description. See InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is - 4294967295, which indicates that the behavior will entirely depend on the - delay configured in the stateless service description. + Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates + that the behavior will entirely depend on the delay configured in the stateless service + description. :type instance_close_delay_duration_in_seconds: long """ @@ -17606,7 +21430,21 @@ class RollingUpgradeUpdateDescription(Model): 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, } - def __init__(self, *, rolling_upgrade_mode="UnmonitoredAuto", force_restart: bool=None, replica_set_check_timeout_in_milliseconds: int=None, failure_action=None, health_check_wait_duration_in_milliseconds: str=None, health_check_stable_duration_in_milliseconds: str=None, health_check_retry_timeout_in_milliseconds: str=None, upgrade_timeout_in_milliseconds: str=None, upgrade_domain_timeout_in_milliseconds: str=None, instance_close_delay_duration_in_seconds: int=None, **kwargs) -> None: + def __init__( + self, + *, + rolling_upgrade_mode: Union[str, "UpgradeMode"] = "UnmonitoredAuto", + force_restart: Optional[bool] = False, + replica_set_check_timeout_in_milliseconds: Optional[int] = 42949672925, + failure_action: Optional[Union[str, "FailureAction"]] = None, + health_check_wait_duration_in_milliseconds: Optional[str] = "0", + health_check_stable_duration_in_milliseconds: Optional[str] = "PT0H2M0S", + health_check_retry_timeout_in_milliseconds: Optional[str] = "PT0H10M0S", + upgrade_timeout_in_milliseconds: Optional[str] = "P10675199DT02H48M05.4775807S", + upgrade_domain_timeout_in_milliseconds: Optional[str] = "P10675199DT02H48M05.4775807S", + instance_close_delay_duration_in_seconds: Optional[int] = 4294967295, + **kwargs + ): super(RollingUpgradeUpdateDescription, self).__init__(**kwargs) self.rolling_upgrade_mode = rolling_upgrade_mode self.force_restart = force_restart @@ -17621,19 +21459,15 @@ def __init__(self, *, rolling_upgrade_mode="UnmonitoredAuto", force_restart: boo class RunToCompletionExecutionPolicy(ExecutionPolicy): - """The run to completion execution policy, the service will perform its - desired operation and complete successfully. If the service encounters - failure, it will restarted based on restart policy specified. If the - service completes its operation successfully, it will not be restarted - again. + """The run to completion execution policy, the service will perform its desired operation and complete successfully. If the service encounters failure, it will restarted based on restart policy specified. If the service completes its operation successfully, it will not be restarted again. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param restart: Required. Enumerates the restart policy for - RunToCompletionExecutionPolicy. Possible values include: 'OnFailure', - 'Never' + :param type: Required. Enumerates the execution policy types for services.Constant filled by + server. Possible values include: "Default", "RunToCompletion". + :type type: str or ~azure.servicefabric.models.ExecutionPolicyType + :param restart: Required. Enumerates the restart policy for RunToCompletionExecutionPolicy. + Possible values include: "OnFailure", "Never". :type restart: str or ~azure.servicefabric.models.RestartPolicy """ @@ -17647,20 +21481,23 @@ class RunToCompletionExecutionPolicy(ExecutionPolicy): 'restart': {'key': 'restart', 'type': 'str'}, } - def __init__(self, *, restart, **kwargs) -> None: + def __init__( + self, + *, + restart: Union[str, "RestartPolicy"], + **kwargs + ): super(RunToCompletionExecutionPolicy, self).__init__(**kwargs) + self.type = 'RunToCompletion' # type: str self.restart = restart - self.type = 'RunToCompletion' -class SafetyCheckWrapper(Model): - """A wrapper for the safety check object. Safety checks are performed by - service fabric before continuing with the operations. These checks ensure - the availability of the service and the reliability of the state. +class SafetyCheckWrapper(msrest.serialization.Model): + """A wrapper for the safety check object. Safety checks are performed by service fabric before continuing with the operations. These checks ensure the availability of the service and the reliability of the state. - :param safety_check: Represents a safety check performed by service fabric - before continuing with the operations. These checks ensure the - availability of the service and the reliability of the state. + :param safety_check: Represents a safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. :type safety_check: ~azure.servicefabric.models.SafetyCheck """ @@ -17668,24 +21505,26 @@ class SafetyCheckWrapper(Model): 'safety_check': {'key': 'SafetyCheck', 'type': 'SafetyCheck'}, } - def __init__(self, *, safety_check=None, **kwargs) -> None: + def __init__( + self, + *, + safety_check: Optional["SafetyCheck"] = None, + **kwargs + ): super(SafetyCheckWrapper, self).__init__(**kwargs) self.safety_check = safety_check -class ScalingPolicyDescription(Model): +class ScalingPolicyDescription(msrest.serialization.Model): """Describes how the scaling should be performed. All required parameters must be populated in order to send to Azure. - :param scaling_trigger: Required. Specifies the trigger associated with - this scaling policy - :type scaling_trigger: - ~azure.servicefabric.models.ScalingTriggerDescription - :param scaling_mechanism: Required. Specifies the mechanism associated - with this scaling policy - :type scaling_mechanism: - ~azure.servicefabric.models.ScalingMechanismDescription + :param scaling_trigger: Required. Specifies the trigger associated with this scaling policy. + :type scaling_trigger: ~azure.servicefabric.models.ScalingTriggerDescription + :param scaling_mechanism: Required. Specifies the mechanism associated with this scaling + policy. + :type scaling_mechanism: ~azure.servicefabric.models.ScalingMechanismDescription """ _validation = { @@ -17698,49 +21537,50 @@ class ScalingPolicyDescription(Model): 'scaling_mechanism': {'key': 'ScalingMechanism', 'type': 'ScalingMechanismDescription'}, } - def __init__(self, *, scaling_trigger, scaling_mechanism, **kwargs) -> None: + def __init__( + self, + *, + scaling_trigger: "ScalingTriggerDescription", + scaling_mechanism: "ScalingMechanismDescription", + **kwargs + ): super(ScalingPolicyDescription, self).__init__(**kwargs) self.scaling_trigger = scaling_trigger self.scaling_mechanism = scaling_mechanism class SecondaryReplicatorStatus(ReplicatorStatus): - """Provides statistics about the Service Fabric Replicator, when it is - functioning in a ActiveSecondary role. + """Provides statistics about the Service Fabric Replicator, when it is functioning in a ActiveSecondary role. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SecondaryActiveReplicatorStatus, - SecondaryIdleReplicatorStatus - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. Constant filled by server. - :type kind: str - :param replication_queue_status: Details about the replication queue on - the secondary replicator. - :type replication_queue_status: - ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp - (UTC) at which a replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation - message was never received. - :type last_replication_operation_received_time_utc: datetime - :param is_in_build: Value that indicates whether the replica is currently - being built. - :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary + sub-classes are: SecondaryActiveReplicatorStatus, SecondaryIdleReplicatorStatus. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param replication_queue_status: Details about the replication queue on the secondary replicator. + :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a + replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation message was never + received. + :type last_replication_operation_received_time_utc: ~datetime.datetime + :param is_in_build: Value that indicates whether the replica is currently being built. + :type is_in_build: bool + :param copy_queue_status: Details about the copy queue on the secondary replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at - which a copy operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation - message was never received. - :type last_copy_operation_received_time_utc: datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at - which an acknowledgment was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment - message was never sent. - :type last_acknowledgement_sent_time_utc: datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy + operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation message was never + received. + :type last_copy_operation_received_time_utc: ~datetime.datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment + was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. + :type last_acknowledgement_sent_time_utc: ~datetime.datetime """ _validation = { @@ -17761,50 +21601,56 @@ class SecondaryReplicatorStatus(ReplicatorStatus): 'kind': {'ActiveSecondary': 'SecondaryActiveReplicatorStatus', 'IdleSecondary': 'SecondaryIdleReplicatorStatus'} } - def __init__(self, *, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build: bool=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None, **kwargs) -> None: + def __init__( + self, + *, + replication_queue_status: Optional["ReplicatorQueueStatus"] = None, + last_replication_operation_received_time_utc: Optional[datetime.datetime] = None, + is_in_build: Optional[bool] = None, + copy_queue_status: Optional["ReplicatorQueueStatus"] = None, + last_copy_operation_received_time_utc: Optional[datetime.datetime] = None, + last_acknowledgement_sent_time_utc: Optional[datetime.datetime] = None, + **kwargs + ): super(SecondaryReplicatorStatus, self).__init__(**kwargs) + self.kind = 'SecondaryReplicatorStatus' # type: str self.replication_queue_status = replication_queue_status self.last_replication_operation_received_time_utc = last_replication_operation_received_time_utc self.is_in_build = is_in_build self.copy_queue_status = copy_queue_status self.last_copy_operation_received_time_utc = last_copy_operation_received_time_utc self.last_acknowledgement_sent_time_utc = last_acknowledgement_sent_time_utc - self.kind = 'SecondaryReplicatorStatus' class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): - """Status of the secondary replicator when it is in active mode and is part of - the replica set. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. Constant filled by server. - :type kind: str - :param replication_queue_status: Details about the replication queue on - the secondary replicator. - :type replication_queue_status: - ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp - (UTC) at which a replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation - message was never received. - :type last_replication_operation_received_time_utc: datetime - :param is_in_build: Value that indicates whether the replica is currently - being built. - :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary + """Status of the secondary replicator when it is in active mode and is part of the replica set. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param replication_queue_status: Details about the replication queue on the secondary replicator. + :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a + replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation message was never + received. + :type last_replication_operation_received_time_utc: ~datetime.datetime + :param is_in_build: Value that indicates whether the replica is currently being built. + :type is_in_build: bool + :param copy_queue_status: Details about the copy queue on the secondary replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at - which a copy operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation - message was never received. - :type last_copy_operation_received_time_utc: datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at - which an acknowledgment was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment - message was never sent. - :type last_acknowledgement_sent_time_utc: datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy + operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation message was never + received. + :type last_copy_operation_received_time_utc: ~datetime.datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment + was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. + :type last_acknowledgement_sent_time_utc: ~datetime.datetime """ _validation = { @@ -17821,44 +21667,50 @@ class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, } - def __init__(self, *, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build: bool=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None, **kwargs) -> None: + def __init__( + self, + *, + replication_queue_status: Optional["ReplicatorQueueStatus"] = None, + last_replication_operation_received_time_utc: Optional[datetime.datetime] = None, + is_in_build: Optional[bool] = None, + copy_queue_status: Optional["ReplicatorQueueStatus"] = None, + last_copy_operation_received_time_utc: Optional[datetime.datetime] = None, + last_acknowledgement_sent_time_utc: Optional[datetime.datetime] = None, + **kwargs + ): super(SecondaryActiveReplicatorStatus, self).__init__(replication_queue_status=replication_queue_status, last_replication_operation_received_time_utc=last_replication_operation_received_time_utc, is_in_build=is_in_build, copy_queue_status=copy_queue_status, last_copy_operation_received_time_utc=last_copy_operation_received_time_utc, last_acknowledgement_sent_time_utc=last_acknowledgement_sent_time_utc, **kwargs) - self.kind = 'ActiveSecondary' + self.kind = 'ActiveSecondary' # type: str class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): - """Status of the secondary replicator when it is in idle mode and is being - built by the primary. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. Constant filled by server. - :type kind: str - :param replication_queue_status: Details about the replication queue on - the secondary replicator. - :type replication_queue_status: - ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp - (UTC) at which a replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation - message was never received. - :type last_replication_operation_received_time_utc: datetime - :param is_in_build: Value that indicates whether the replica is currently - being built. - :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary + """Status of the secondary replicator when it is in idle mode and is being built by the primary. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The role of a replica of a stateful service.Constant filled by server. + Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param replication_queue_status: Details about the replication queue on the secondary replicator. + :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a + replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation message was never + received. + :type last_replication_operation_received_time_utc: ~datetime.datetime + :param is_in_build: Value that indicates whether the replica is currently being built. + :type is_in_build: bool + :param copy_queue_status: Details about the copy queue on the secondary replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at - which a copy operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation - message was never received. - :type last_copy_operation_received_time_utc: datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at - which an acknowledgment was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment - message was never sent. - :type last_acknowledgement_sent_time_utc: datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy + operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation message was never + received. + :type last_copy_operation_received_time_utc: ~datetime.datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment + was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. + :type last_acknowledgement_sent_time_utc: ~datetime.datetime """ _validation = { @@ -17875,18 +21727,27 @@ class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, } - def __init__(self, *, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build: bool=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None, **kwargs) -> None: + def __init__( + self, + *, + replication_queue_status: Optional["ReplicatorQueueStatus"] = None, + last_replication_operation_received_time_utc: Optional[datetime.datetime] = None, + is_in_build: Optional[bool] = None, + copy_queue_status: Optional["ReplicatorQueueStatus"] = None, + last_copy_operation_received_time_utc: Optional[datetime.datetime] = None, + last_acknowledgement_sent_time_utc: Optional[datetime.datetime] = None, + **kwargs + ): super(SecondaryIdleReplicatorStatus, self).__init__(replication_queue_status=replication_queue_status, last_replication_operation_received_time_utc=last_replication_operation_received_time_utc, is_in_build=is_in_build, copy_queue_status=copy_queue_status, last_copy_operation_received_time_utc=last_copy_operation_received_time_utc, last_acknowledgement_sent_time_utc=last_acknowledgement_sent_time_utc, **kwargs) - self.kind = 'IdleSecondary' + self.kind = 'IdleSecondary' # type: str -class SecretResourceDescription(Model): +class SecretResourceDescription(msrest.serialization.Model): """This type describes a secret resource. All required parameters must be populated in order to send to Azure. - :param properties: Required. Describes the properties of a secret - resource. + :param properties: Required. Describes the properties of a secret resource. :type properties: ~azure.servicefabric.models.SecretResourceProperties :param name: Required. Name of the Secret resource. :type name: str @@ -17902,13 +21763,19 @@ class SecretResourceDescription(Model): 'name': {'key': 'name', 'type': 'str'}, } - def __init__(self, *, properties, name: str, **kwargs) -> None: + def __init__( + self, + *, + properties: "SecretResourceProperties", + name: str, + **kwargs + ): super(SecretResourceDescription, self).__init__(**kwargs) self.properties = properties self.name = name -class SecretValue(Model): +class SecretValue(msrest.serialization.Model): """This type represents the unencrypted value of the secret. :param value: The actual value of the secret. @@ -17919,12 +21786,17 @@ class SecretValue(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, *, value: str=None, **kwargs) -> None: + def __init__( + self, + *, + value: Optional[str] = None, + **kwargs + ): super(SecretValue, self).__init__(**kwargs) self.value = value -class SecretValueProperties(Model): +class SecretValueProperties(msrest.serialization.Model): """This type describes properties of secret value resource. :param value: The actual value of the secret. @@ -17935,14 +21807,18 @@ class SecretValueProperties(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, *, value: str=None, **kwargs) -> None: + def __init__( + self, + *, + value: Optional[str] = None, + **kwargs + ): super(SecretValueProperties, self).__init__(**kwargs) self.value = value -class SecretValueResourceDescription(Model): - """This type describes a value of a secret resource. The name of this resource - is the version identifier corresponding to this secret value. +class SecretValueResourceDescription(msrest.serialization.Model): + """This type describes a value of a secret resource. The name of this resource is the version identifier corresponding to this secret value. All required parameters must be populated in order to send to Azure. @@ -17961,20 +21837,49 @@ class SecretValueResourceDescription(Model): 'value': {'key': 'properties.value', 'type': 'str'}, } - def __init__(self, *, name: str, value: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + value: Optional[str] = None, + **kwargs + ): super(SecretValueResourceDescription, self).__init__(**kwargs) self.name = name self.value = value +class SecretValueResourceProperties(SecretValueProperties): + """This type describes properties of a secret value resource. + + :param value: The actual value of the secret. + :type value: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[str] = None, + **kwargs + ): + super(SecretValueResourceProperties, self).__init__(value=value, **kwargs) + + class SeedNodeSafetyCheck(SafetyCheck): - """Represents a safety check for the seed nodes being performed by service - fabric before continuing with node level operations. + """Represents a safety check for the seed nodes being performed by service fabric before continuing with node level operations. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind """ _validation = { @@ -17985,22 +21890,23 @@ class SeedNodeSafetyCheck(SafetyCheck): 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(SeedNodeSafetyCheck, self).__init__(**kwargs) - self.kind = 'EnsureSeedNodeQuorum' + self.kind = 'EnsureSeedNodeQuorum' # type: str -class SelectedPartition(Model): - """This class returns information about the partition that the user-induced - operation acted upon. +class SelectedPartition(msrest.serialization.Model): + """This class returns information about the partition that the user-induced operation acted upon. :param service_name: The name of the service the partition belongs to. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely - identify a partition. This is a randomly generated GUID when the service - was created. The partition ID is unique and does not change for the - lifetime of the service. If the same service was deleted and recreated the - IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. + This is a randomly generated GUID when the service was created. The partition ID is unique and + does not change for the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str """ @@ -18009,33 +21915,36 @@ class SelectedPartition(Model): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, service_name: str=None, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + partition_id: Optional[str] = None, + **kwargs + ): super(SelectedPartition, self).__init__(**kwargs) self.service_name = service_name self.partition_id = partition_id class ServiceBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information for a specific Service Fabric service - specifying what backup policy is being applied and suspend description, if - any. + """Backup configuration information for a specific Service Fabric service specifying what backup policy is being applied and suspend description, if any. All required parameters must be populated in order to send to Azure. - :param policy_name: The name of the backup policy which is applicable to - this Service Fabric application or service or partition. + :param kind: Required. The entity type of a Service Fabric entity such as Application, Service + or a Partition where periodic backups can be enabled.Constant filled by server. Possible + values include: "Invalid", "Partition", "Service", "Application". + :type kind: str or ~azure.servicefabric.models.BackupEntityKind + :param policy_name: The name of the backup policy which is applicable to this Service Fabric + application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup - policy is applied. Possible values include: 'Invalid', 'Partition', - 'Service', 'Application' - :type policy_inherited_from: str or - ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup policy is applied. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param kind: Required. Constant filled by server. - :type kind: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str """ @@ -18044,17 +21953,25 @@ class ServiceBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, service_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + policy_name: Optional[str] = None, + policy_inherited_from: Optional[Union[str, "BackupPolicyScope"]] = None, + suspension_info: Optional["BackupSuspensionInfo"] = None, + service_name: Optional[str] = None, + **kwargs + ): super(ServiceBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info, **kwargs) + self.kind = 'Service' # type: str self.service_name = service_name - self.kind = 'Service' class ServiceBackupEntity(BackupEntity): @@ -18062,10 +21979,11 @@ class ServiceBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. Constant filled by server. - :type entity_kind: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, + Service or a Partition where periodic backups can be enabled.Constant filled by server. + Possible values include: "Invalid", "Partition", "Service", "Application". + :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str """ @@ -18078,24 +21996,28 @@ class ServiceBackupEntity(BackupEntity): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, *, service_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + **kwargs + ): super(ServiceBackupEntity, self).__init__(**kwargs) + self.entity_kind = 'Service' # type: str self.service_name = service_name - self.entity_kind = 'Service' -class ServiceCorrelationDescription(Model): +class ServiceCorrelationDescription(msrest.serialization.Model): """Creates a particular correlation between services. All required parameters must be populated in order to send to Azure. - :param scheme: Required. The ServiceCorrelationScheme which describes the - relationship between this service and the service specified via - ServiceName. Possible values include: 'Invalid', 'Affinity', - 'AlignedAffinity', 'NonAlignedAffinity' + :param scheme: Required. The ServiceCorrelationScheme which describes the relationship between + this service and the service specified via ServiceName. Possible values include: "Invalid", + "Affinity", "AlignedAffinity", "NonAlignedAffinity". :type scheme: str or ~azure.servicefabric.models.ServiceCorrelationScheme - :param service_name: Required. The name of the service that the - correlation relationship is established with. + :param service_name: Required. The name of the service that the correlation relationship is + established with. :type service_name: str """ @@ -18109,7 +22031,13 @@ class ServiceCorrelationDescription(Model): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, *, scheme, service_name: str, **kwargs) -> None: + def __init__( + self, + *, + scheme: Union[str, "ServiceCorrelationScheme"], + service_name: str, + **kwargs + ): super(ServiceCorrelationDescription, self).__init__(**kwargs) self.scheme = scheme self.service_name = service_name @@ -18119,57 +22047,84 @@ class ServiceEvent(FabricEvent): """Represents the base for all Service Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, - ServiceNewHealthReportEvent, ServiceHealthReportExpiredEvent - - All required parameters must be populated in order to send to Azure. - - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, ServiceHealthReportExpiredEvent, ServiceNewHealthReportEvent. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceNewHealthReport': 'ServiceNewHealthReportEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent'} - } - - def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent', 'ServiceNewHealthReport': 'ServiceNewHealthReportEvent'} + } + + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + service_id: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ServiceEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ServiceEvent' # type: str self.service_id = service_id - self.kind = 'ServiceEvent' class ServiceCreatedEvent(ServiceEvent): @@ -18177,25 +22132,44 @@ class ServiceCreatedEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str :param service_type_name: Required. Service type name. :type service_type_name: str @@ -18215,18 +22189,17 @@ class ServiceCreatedEvent(ServiceEvent): :type min_replica_set_size: int :param service_package_version: Required. Version of Service package. :type service_package_version: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, 'service_type_name': {'required': True}, 'application_name': {'required': True}, @@ -18241,11 +22214,11 @@ class ServiceCreatedEvent(ServiceEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, @@ -18259,8 +22232,28 @@ class ServiceCreatedEvent(ServiceEvent): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, service_type_name: str, application_name: str, application_type_name: str, service_instance: int, is_stateful: bool, partition_count: int, target_replica_set_size: int, min_replica_set_size: int, service_package_version: str, partition_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + service_id: str, + service_type_name: str, + application_name: str, + application_type_name: str, + service_instance: int, + is_stateful: bool, + partition_count: int, + target_replica_set_size: int, + min_replica_set_size: int, + service_package_version: str, + partition_id: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ServiceCreatedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) + self.kind = 'ServiceCreated' # type: str self.service_type_name = service_type_name self.application_name = application_name self.application_type_name = application_type_name @@ -18271,7 +22264,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, servi self.min_replica_set_size = min_replica_set_size self.service_package_version = service_package_version self.partition_id = partition_id - self.kind = 'ServiceCreated' class ServiceDeletedEvent(ServiceEvent): @@ -18279,25 +22271,44 @@ class ServiceDeletedEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str :param service_type_name: Required. Service type name. :type service_type_name: str @@ -18320,9 +22331,9 @@ class ServiceDeletedEvent(ServiceEvent): """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, 'service_type_name': {'required': True}, 'application_name': {'required': True}, @@ -18336,11 +22347,11 @@ class ServiceDeletedEvent(ServiceEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, @@ -18353,8 +22364,27 @@ class ServiceDeletedEvent(ServiceEvent): 'service_package_version': {'key': 'ServicePackageVersion', 'type': 'str'}, } - def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, service_type_name: str, application_name: str, application_type_name: str, service_instance: int, is_stateful: bool, partition_count: int, target_replica_set_size: int, min_replica_set_size: int, service_package_version: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + service_id: str, + service_type_name: str, + application_name: str, + application_type_name: str, + service_instance: int, + is_stateful: bool, + partition_count: int, + target_replica_set_size: int, + min_replica_set_size: int, + service_package_version: str, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ServiceDeletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) + self.kind = 'ServiceDeleted' # type: str self.service_type_name = service_type_name self.application_name = application_name self.application_type_name = application_type_name @@ -18364,79 +22394,72 @@ def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, servi self.target_replica_set_size = target_replica_set_size self.min_replica_set_size = min_replica_set_size self.service_package_version = service_package_version - self.kind = 'ServiceDeleted' -class ServiceDescription(Model): - """A ServiceDescription contains all of the information necessary to create a - service. +class ServiceDescription(msrest.serialization.Model): + """A ServiceDescription contains all of the information necessary to create a service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceDescription, StatelessServiceDescription + sub-classes are: StatefulServiceDescription, StatelessServiceDescription. All required parameters must be populated in order to send to Azure. - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' - URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified - in the service manifest. + :param service_type_name: Required. Name of the service type as specified in the service + manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. - Initialization data is passed to service instances or replicas when they - are created. + :param initialization_data: The initialization data as an array of bytes. Initialization data + is passed to service instances or replicas when they are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an - object. - :type partition_description: - ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an object. + :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost - property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service - package to be used for a service. Possible values include: - 'SharedProcess', 'ExclusiveProcess' + :param service_package_activation_mode: The activation mode of service package to be used for a + service. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS - system service to be enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS system service to be + enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param tags_required_to_place: Tags for placement of this service. + :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :param tags_required_to_run: Tags for running of this service. + :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription """ _validation = { + 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, - 'service_kind': {'required': True}, } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -18451,15 +22474,37 @@ class ServiceDescription(Model): 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, + 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceDescription', 'Stateless': 'StatelessServiceDescription'} } - def __init__(self, *, service_name: str, service_type_name: str, partition_description, application_name: str=None, initialization_data=None, placement_constraints: str=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified: bool=None, service_package_activation_mode=None, service_dns_name: str=None, scaling_policies=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: str, + service_type_name: str, + partition_description: "PartitionSchemeDescription", + application_name: Optional[str] = None, + initialization_data: Optional[List[int]] = None, + placement_constraints: Optional[str] = None, + correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, + service_load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, + service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, + default_move_cost: Optional[Union[str, "MoveCost"]] = None, + is_default_move_cost_specified: Optional[bool] = None, + service_package_activation_mode: Optional[Union[str, "ServicePackageActivationMode"]] = None, + service_dns_name: Optional[str] = None, + scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, + tags_required_to_place: Optional["NodeTagsDescription"] = None, + tags_required_to_run: Optional["NodeTagsDescription"] = None, + **kwargs + ): super(ServiceDescription, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.application_name = application_name self.service_name = service_name self.service_type_name = service_type_name @@ -18474,34 +22519,31 @@ def __init__(self, *, service_name: str, service_type_name: str, partition_descr self.service_package_activation_mode = service_package_activation_mode self.service_dns_name = service_dns_name self.scaling_policies = scaling_policies - self.service_kind = None + self.tags_required_to_place = tags_required_to_place + self.tags_required_to_run = tags_required_to_run -class ServiceFromTemplateDescription(Model): - """Defines description for creating a Service Fabric service from a template - defined in the application manifest. +class ServiceFromTemplateDescription(msrest.serialization.Model): + """Defines description for creating a Service Fabric service from a template defined in the application manifest. All required parameters must be populated in order to send to Azure. - :param application_name: Required. The name of the application, including - the 'fabric:' URI scheme. + :param application_name: Required. The name of the application, including the 'fabric:' URI + scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' - URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified - in the service manifest. + :param service_type_name: Required. Name of the service type as specified in the service + manifest. :type service_type_name: str - :param initialization_data: The initialization data for the newly created - service instance. + :param initialization_data: The initialization data for the newly created service instance. :type initialization_data: list[int] - :param service_package_activation_mode: The activation mode of service - package to be used for a service. Possible values include: - 'SharedProcess', 'ExclusiveProcess' + :param service_package_activation_mode: The activation mode of service package to be used for a + service. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS - system service to be enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS system service to be + enabled in Service Fabric cluster. :type service_dns_name: str """ @@ -18520,7 +22562,17 @@ class ServiceFromTemplateDescription(Model): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, } - def __init__(self, *, application_name: str, service_name: str, service_type_name: str, initialization_data=None, service_package_activation_mode=None, service_dns_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + application_name: str, + service_name: str, + service_type_name: str, + initialization_data: Optional[List[int]] = None, + service_package_activation_mode: Optional[Union[str, "ServicePackageActivationMode"]] = None, + service_dns_name: Optional[str] = None, + **kwargs + ): super(ServiceFromTemplateDescription, self).__init__(**kwargs) self.application_name = application_name self.service_name = service_name @@ -18533,30 +22585,26 @@ def __init__(self, *, application_name: str, service_name: str, service_type_nam class ServiceHealth(EntityHealth): """Information about the health of a Service Fabric service. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: The name of the service whose health information is described - by this object. + :param name: The name of the service whose health information is described by this object. :type name: str - :param partition_health_states: The list of partition health states - associated with the service. - :type partition_health_states: - list[~azure.servicefabric.models.PartitionHealthState] + :param partition_health_states: The list of partition health states associated with the + service. + :type partition_health_states: list[~azure.servicefabric.models.PartitionHealthState] """ _attribute_map = { @@ -18568,40 +22616,50 @@ class ServiceHealth(EntityHealth): 'partition_health_states': {'key': 'PartitionHealthStates', 'type': '[PartitionHealthState]'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, partition_health_states=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + name: Optional[str] = None, + partition_health_states: Optional[List["PartitionHealthState"]] = None, + **kwargs + ): super(ServiceHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.name = name self.partition_health_states = partition_health_states class ServiceHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a service, containing information about - the data and the algorithm used by health store to evaluate health. The - evaluation is returned only when the aggregated health state is either - Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for a service, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param service_name: Name of the service whose health evaluation is - described by this object. + :param service_name: Name of the service whose health evaluation is described by this object. :type service_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the service. The types of the - unhealthy evaluations can be PartitionsHealthEvaluation or - EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the service. The types of the unhealthy evaluations can be + PartitionsHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -18609,18 +22667,26 @@ class ServiceHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, service_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + service_name: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(ServiceHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Service' # type: str self.service_name = service_name self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Service' class ServiceHealthReportExpiredEvent(ServiceEvent): @@ -18628,25 +22694,44 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str :param instance_id: Required. Id of Service instance. :type instance_id: long @@ -18662,17 +22747,16 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, 'instance_id': {'required': True}, 'source_id': {'required': True}, @@ -18686,11 +22770,11 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -18703,8 +22787,27 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + service_id: str, + instance_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ServiceHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) + self.kind = 'ServiceHealthReportExpired' # type: str self.instance_id = instance_id self.source_id = source_id self.property = property @@ -18714,21 +22817,16 @@ def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, insta self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'ServiceHealthReportExpired' class ServiceHealthState(EntityHealthState): - """Represents the health state of a service, which contains the service - identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param service_name: Name of the service whose health state is represented - by this object. + """Represents the health state of a service, which contains the service identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param service_name: Name of the service whose health state is represented by this object. :type service_name: str """ @@ -18737,28 +22835,30 @@ class ServiceHealthState(EntityHealthState): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, service_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + service_name: Optional[str] = None, + **kwargs + ): super(ServiceHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.service_name = service_name class ServiceHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a service, which contains the service - name, its aggregated health state and any partitions that respect the - filters in the cluster health chunk query description. + """Represents the health state chunk of a service, which contains the service name, its aggregated health state and any partitions that respect the filters in the cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_name: The name of the service whose health state chunk is - provided in this object. + :param service_name: The name of the service whose health state chunk is provided in this + object. :type service_name: str - :param partition_health_state_chunks: The list of partition health state - chunks belonging to the service that respect the filters in the cluster - health chunk query description. - :type partition_health_state_chunks: - ~azure.servicefabric.models.PartitionHealthStateChunkList + :param partition_health_state_chunks: The list of partition health state chunks belonging to + the service that respect the filters in the cluster health chunk query description. + :type partition_health_state_chunks: ~azure.servicefabric.models.PartitionHealthStateChunkList """ _attribute_map = { @@ -18767,18 +22867,24 @@ class ServiceHealthStateChunk(EntityHealthStateChunk): 'partition_health_state_chunks': {'key': 'PartitionHealthStateChunks', 'type': 'PartitionHealthStateChunkList'}, } - def __init__(self, *, health_state=None, service_name: str=None, partition_health_state_chunks=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + service_name: Optional[str] = None, + partition_health_state_chunks: Optional["PartitionHealthStateChunkList"] = None, + **kwargs + ): super(ServiceHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.service_name = service_name self.partition_health_state_chunks = partition_health_state_chunks -class ServiceHealthStateChunkList(Model): - """The list of service health state chunks that respect the input filters in - the chunk query. Returned by get cluster health state chunks query. +class ServiceHealthStateChunkList(msrest.serialization.Model): + """The list of service health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - :param items: The list of service health state chunks that respect the - input filters in the chunk query. + :param items: The list of service health state chunks that respect the input filters in the + chunk query. :type items: list[~azure.servicefabric.models.ServiceHealthStateChunk] """ @@ -18786,67 +22892,60 @@ class ServiceHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[ServiceHealthStateChunk]'}, } - def __init__(self, *, items=None, **kwargs) -> None: + def __init__( + self, + *, + items: Optional[List["ServiceHealthStateChunk"]] = None, + **kwargs + ): super(ServiceHealthStateChunkList, self).__init__(**kwargs) self.items = items -class ServiceHealthStateFilter(Model): - """Defines matching criteria to determine whether a service should be included - as a child of an application in the cluster health chunk. - The services are only returned if the parent application matches a filter - specified in the cluster health chunk query description. - One filter can match zero, one or multiple services, depending on its - properties. - - :param service_name_filter: The name of the service that matches the - filter. The filter is applied only to the specified service, if it exists. - If the service doesn't exist, no service is returned in the cluster health - chunk based on this filter. - If the service exists, it is included as the application's child if the - health state matches the other filter properties. - If not specified, all services that match the parent filters (if any) are - taken into consideration and matched against the other filter members, - like health state filter. +class ServiceHealthStateFilter(msrest.serialization.Model): + """Defines matching criteria to determine whether a service should be included as a child of an application in the cluster health chunk. +The services are only returned if the parent application matches a filter specified in the cluster health chunk query description. +One filter can match zero, one or multiple services, depending on its properties. + + :param service_name_filter: The name of the service that matches the filter. The filter is + applied only to the specified service, if it exists. + If the service doesn't exist, no service is returned in the cluster health chunk based on this + filter. + If the service exists, it is included as the application's child if the health state matches + the other filter properties. + If not specified, all services that match the parent filters (if any) are taken into + consideration and matched against the other filter members, like health state filter. :type service_name_filter: str - :param health_state_filter: The filter for the health state of the - services. It allows selecting services if they match the desired health - states. - The possible values are integer value of one of the following health - states. Only services that match the filter are returned. All services are - used to evaluate the cluster aggregated health state. - If not specified, default value is None, unless the service name is - specified. If the filter has default value and service name is specified, - the matching service is returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches services with - HealthState value of OK (2) and Warning (4). - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in order to - return no results on a given collection of states. The value is 1. - - Ok - Filter that matches input with HealthState value Ok. The value is - 2. - - Warning - Filter that matches input with HealthState value Warning. The - value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The value is - 65535. Default value: 0 . + :param health_state_filter: The filter for the health state of the services. It allows + selecting services if they match the desired health states. + The possible values are integer value of one of the following health states. Only services + that match the filter are returned. All services are used to evaluate the cluster aggregated + health state. + If not specified, default value is None, unless the service name is specified. If the filter + has default value and service name is specified, the matching service is returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches services with HealthState value of OK (2) + and Warning (4). + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. :type health_state_filter: int - :param partition_filters: Defines a list of filters that specify which - partitions to be included in the returned cluster health chunk as children - of the service. The partitions are returned only if the parent service - matches a filter. - If the list is empty, no partitions are returned. All the partitions are - used to evaluate the parent service aggregated health state, regardless of - the input filters. + :param partition_filters: Defines a list of filters that specify which partitions to be + included in the returned cluster health chunk as children of the service. The partitions are + returned only if the parent service matches a filter. + If the list is empty, no partitions are returned. All the partitions are used to evaluate the + parent service aggregated health state, regardless of the input filters. The service filter may specify multiple partition filters. - For example, it can specify a filter to return all partitions with health - state Error and another filter to always include a partition identified by - its partition ID. - :type partition_filters: - list[~azure.servicefabric.models.PartitionHealthStateFilter] + For example, it can specify a filter to return all partitions with health state Error and + another filter to always include a partition identified by its partition ID. + :type partition_filters: list[~azure.servicefabric.models.PartitionHealthStateFilter] """ _attribute_map = { @@ -18855,14 +22954,21 @@ class ServiceHealthStateFilter(Model): 'partition_filters': {'key': 'PartitionFilters', 'type': '[PartitionHealthStateFilter]'}, } - def __init__(self, *, service_name_filter: str=None, health_state_filter: int=0, partition_filters=None, **kwargs) -> None: + def __init__( + self, + *, + service_name_filter: Optional[str] = None, + health_state_filter: Optional[int] = 0, + partition_filters: Optional[List["PartitionHealthStateFilter"]] = None, + **kwargs + ): super(ServiceHealthStateFilter, self).__init__(**kwargs) self.service_name_filter = service_name_filter self.health_state_filter = health_state_filter self.partition_filters = partition_filters -class ServiceIdentity(Model): +class ServiceIdentity(msrest.serialization.Model): """Map service identity friendly name to an application identity. :param name: The identity friendly name. @@ -18876,47 +22982,51 @@ class ServiceIdentity(Model): 'identity_ref': {'key': 'identityRef', 'type': 'str'}, } - def __init__(self, *, name: str=None, identity_ref: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + identity_ref: Optional[str] = None, + **kwargs + ): super(ServiceIdentity, self).__init__(**kwargs) self.name = name self.identity_ref = identity_ref -class ServiceInfo(Model): +class ServiceInfo(msrest.serialization.Model): """Information about a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceInfo, StatelessServiceInfo + sub-classes are: StatefulServiceInfo, StatelessServiceInfo. All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded - representation of the service name. This is used in the REST APIs to - identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param id: The identity of the service. This ID is an encoded representation of the service + name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type id: str + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service - manifest. + :param type_name: Name of the service type as specified in the service manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values - include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', - 'Failed' + :param service_status: The status of the application. Possible values include: "Unknown", + "Active", "Upgrading", "Deleting", "Creating", "Failed". :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -18925,55 +23035,62 @@ class ServiceInfo(Model): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceInfo', 'Stateless': 'StatelessServiceInfo'} } - def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + type_name: Optional[str] = None, + manifest_version: Optional[str] = None, + health_state: Optional[Union[str, "HealthState"]] = None, + service_status: Optional[Union[str, "ServiceStatus"]] = None, + is_service_group: Optional[bool] = None, + **kwargs + ): super(ServiceInfo, self).__init__(**kwargs) self.id = id + self.service_kind = None # type: Optional[str] self.name = name self.type_name = type_name self.manifest_version = manifest_version self.health_state = health_state self.service_status = service_status self.is_service_group = is_service_group - self.service_kind = None -class ServiceLoadMetricDescription(Model): +class ServiceLoadMetricDescription(msrest.serialization.Model): """Specifies a metric to load balance a service during runtime. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the metric. If the service chooses to - report load during runtime, the load metric name should match the name - that is specified in Name exactly. Note that metric names are - case-sensitive. + :param name: Required. The name of the metric. If the service chooses to report load during + runtime, the load metric name should match the name that is specified in Name exactly. Note + that metric names are case-sensitive. :type name: str - :param weight: The service load metric relative weight, compared to other - metrics configured for this service, as a number. Possible values include: - 'Zero', 'Low', 'Medium', 'High' + :param weight: The service load metric relative weight, compared to other metrics configured + for this service, as a number. Possible values include: "Zero", "Low", "Medium", "High". :type weight: str or ~azure.servicefabric.models.ServiceLoadMetricWeight - :param primary_default_load: Used only for Stateful services. The default - amount of load, as a number, that this service creates for this metric - when it is a Primary replica. + :param primary_default_load: Used only for Stateful services. The default amount of load, as a + number, that this service creates for this metric when it is a Primary replica. :type primary_default_load: int - :param secondary_default_load: Used only for Stateful services. The - default amount of load, as a number, that this service creates for this - metric when it is a Secondary replica. + :param secondary_default_load: Used only for Stateful services. The default amount of load, as + a number, that this service creates for this metric when it is a Secondary replica. :type secondary_default_load: int - :param default_load: Used only for Stateless services. The default amount - of load, as a number, that this service creates for this metric. + :param default_load: Used only for Stateless services. The default amount of load, as a number, + that this service creates for this metric. :type default_load: int """ @@ -18989,7 +23106,16 @@ class ServiceLoadMetricDescription(Model): 'default_load': {'key': 'DefaultLoad', 'type': 'int'}, } - def __init__(self, *, name: str, weight=None, primary_default_load: int=None, secondary_default_load: int=None, default_load: int=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + weight: Optional[Union[str, "ServiceLoadMetricWeight"]] = None, + primary_default_load: Optional[int] = None, + secondary_default_load: Optional[int] = None, + default_load: Optional[int] = None, + **kwargs + ): super(ServiceLoadMetricDescription, self).__init__(**kwargs) self.name = name self.weight = weight @@ -18998,16 +23124,15 @@ def __init__(self, *, name: str, weight=None, primary_default_load: int=None, se self.default_load = default_load -class ServiceNameInfo(Model): +class ServiceNameInfo(msrest.serialization.Model): """Information about the service name. - :param id: The identity of the service. This ID is an encoded - representation of the service name. This is used in the REST APIs to - identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param id: The identity of the service. This ID is an encoded representation of the service + name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type id: str :param name: The full name of the service with 'fabric:' URI scheme. :type name: str @@ -19018,7 +23143,13 @@ class ServiceNameInfo(Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, *, id: str=None, name: str=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + **kwargs + ): super(ServiceNameInfo, self).__init__(**kwargs) self.id = id self.name = name @@ -19029,25 +23160,44 @@ class ServiceNewHealthReportEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param service_id: Required. The identity of the service. This ID is an - encoded representation of the service name. This is used in the REST APIs - to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param service_id: Required. The identity of the service. This ID is an encoded representation + of the service name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type service_id: str :param instance_id: Required. Id of Service instance. :type instance_id: long @@ -19063,17 +23213,16 @@ class ServiceNewHealthReportEvent(ServiceEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'service_id': {'required': True}, 'instance_id': {'required': True}, 'source_id': {'required': True}, @@ -19087,11 +23236,11 @@ class ServiceNewHealthReportEvent(ServiceEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -19104,8 +23253,27 @@ class ServiceNewHealthReportEvent(ServiceEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + service_id: str, + instance_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(ServiceNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) + self.kind = 'ServiceNewHealthReport' # type: str self.instance_id = instance_id self.source_id = source_id self.property = property @@ -19115,33 +23283,29 @@ def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, insta self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'ServiceNewHealthReport' -class ServicePartitionInfo(Model): +class ServicePartitionInfo(msrest.serialization.Model): """Information about a partition of a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServicePartitionInfo, - StatelessServicePartitionInfo + sub-classes are: StatefulServicePartitionInfo, StatelessServicePartitionInfo. All required parameters must be populated in order to send to Azure. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service - partition. Possible values include: 'Invalid', 'Ready', 'NotReady', - 'InQuorumLoss', 'Reconfiguring', 'Deleting' - :type partition_status: str or - ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, - partitioning scheme and keys supported by it. - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :param partition_status: The status of the service fabric service partition. Possible values + include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". + :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, partitioning scheme and + keys supported by it. + :type partition_information: ~azure.servicefabric.models.PartitionInformation """ _validation = { @@ -19149,38 +23313,44 @@ class ServicePartitionInfo(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServicePartitionInfo', 'Stateless': 'StatelessServicePartitionInfo'} } - def __init__(self, *, health_state=None, partition_status=None, partition_information=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + partition_status: Optional[Union[str, "ServicePartitionStatus"]] = None, + partition_information: Optional["PartitionInformation"] = None, + **kwargs + ): super(ServicePartitionInfo, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.health_state = health_state self.partition_status = partition_status self.partition_information = partition_information - self.service_kind = None -class ServicePlacementPolicyDescription(Model): +class ServicePlacementPolicyDescription(msrest.serialization.Model): """Describes the policy to be used for placement of a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ServicePlacementInvalidDomainPolicyDescription, - ServicePlacementNonPartiallyPlaceServicePolicyDescription, - ServicePlacementPreferPrimaryDomainPolicyDescription, - ServicePlacementRequiredDomainPolicyDescription, - ServicePlacementRequireDomainDistributionPolicyDescription + sub-classes are: ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, ServicePlacementInvalidDomainPolicyDescription, ServicePlacementNonPartiallyPlaceServicePolicyDescription, ServicePlacementPreferPrimaryDomainPolicyDescription, ServicePlacementRequiredDomainPolicyDescription, ServicePlacementRequireDomainDistributionPolicyDescription. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType """ _validation = { @@ -19192,25 +23362,63 @@ class ServicePlacementPolicyDescription(Model): } _subtype_map = { - 'type': {'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} + 'type': {'AllowMultipleStatelessInstancesOnNode': 'ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription', 'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(ServicePlacementPolicyDescription, self).__init__(**kwargs) - self.type = None + self.type = None # type: Optional[str] + + +class ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription(ServicePlacementPolicyDescription): + """Describes the policy to be used for placement of a Service Fabric service allowing multiple stateless instances of a partition of the service to be placed on a node. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: Holdover from other policy descriptions, not used for this policy, values + are ignored by runtime. Keeping it for any backwards-compatibility with clients. + :type domain_name: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'domain_name': {'key': 'DomainName', 'type': 'str'}, + } + + def __init__( + self, + *, + domain_name: Optional[str] = None, + **kwargs + ): + super(ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, self).__init__(**kwargs) + self.type = 'AllowMultipleStatelessInstancesOnNode' # type: str + self.domain_name = domain_name class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where a particular fault or upgrade domain should not be used for placement - of the instances or replicas of that service. + """Describes the policy to be used for placement of a Service Fabric service where a particular fault or upgrade domain should not be used for placement of the instances or replicas of that service. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param domain_name: The name of the domain that should not be used for - placement. + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: The name of the domain that should not be used for placement. :type domain_name: str """ @@ -19223,21 +23431,27 @@ class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescr 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, *, domain_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + domain_name: Optional[str] = None, + **kwargs + ): super(ServicePlacementInvalidDomainPolicyDescription, self).__init__(**kwargs) + self.type = 'InvalidDomain' # type: str self.domain_name = domain_name - self.type = 'InvalidDomain' class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where all replicas must be able to be placed in order for any replicas to - be created. + """Describes the policy to be used for placement of a Service Fabric service where all replicas must be able to be placed in order for any replicas to be created. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType """ _validation = { @@ -19248,29 +23462,27 @@ class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacement 'type': {'key': 'Type', 'type': 'str'}, } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(ServicePlacementNonPartiallyPlaceServicePolicyDescription, self).__init__(**kwargs) - self.type = 'NonPartiallyPlaceService' + self.type = 'NonPartiallyPlaceService' # type: str class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where the service's Primary replicas should optimally be placed in a - particular domain. - This placement policy is usually used with fault domains in scenarios where - the Service Fabric cluster is geographically distributed in order to - indicate that a service's primary replica should be located in a particular - fault domain, which in geo-distributed scenarios usually aligns with - regional or datacenter boundaries. Note that since this is an optimization - it is possible that the Primary replica may not end up located in this - domain due to failures, capacity limits, or other constraints. + """Describes the policy to be used for placement of a Service Fabric service where the service's Primary replicas should optimally be placed in a particular domain. + +This placement policy is usually used with fault domains in scenarios where the Service Fabric cluster is geographically distributed in order to indicate that a service's primary replica should be located in a particular fault domain, which in geo-distributed scenarios usually aligns with regional or datacenter boundaries. Note that since this is an optimization it is possible that the Primary replica may not end up located in this domain due to failures, capacity limits, or other constraints. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param domain_name: The name of the domain that should used for placement - as per this policy. + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: The name of the domain that should used for placement as per this policy. :type domain_name: str """ @@ -19283,23 +23495,28 @@ class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolic 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, *, domain_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + domain_name: Optional[str] = None, + **kwargs + ): super(ServicePlacementPreferPrimaryDomainPolicyDescription, self).__init__(**kwargs) + self.type = 'PreferPrimaryDomain' # type: str self.domain_name = domain_name - self.type = 'PreferPrimaryDomain' class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where the instances or replicas of that service must be placed in a - particular domain. + """Describes the policy to be used for placement of a Service Fabric service where the instances or replicas of that service must be placed in a particular domain. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param domain_name: The name of the domain that should used for placement - as per this policy. + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: The name of the domain that should used for placement as per this policy. :type domain_name: str """ @@ -19312,31 +23529,30 @@ class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDesc 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, *, domain_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + domain_name: Optional[str] = None, + **kwargs + ): super(ServicePlacementRequiredDomainPolicyDescription, self).__init__(**kwargs) + self.type = 'RequireDomain' # type: str self.domain_name = domain_name - self.type = 'RequireDomain' class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service - where two replicas from the same partition should never be placed in the - same fault or upgrade domain. - While this is not common it can expose the service to an increased risk of - concurrent failures due to unplanned outages or other cases of - subsequent/concurrent failures. As an example, consider a case where - replicas are deployed across different data center, with one replica per - location. In the event that one of the datacenters goes offline, normally - the replica that was placed in that datacenter will be packed into one of - the remaining datacenters. If this is not desirable then this policy should - be set. + """Describes the policy to be used for placement of a Service Fabric service where two replicas from the same partition should never be placed in the same fault or upgrade domain. + +While this is not common it can expose the service to an increased risk of concurrent failures due to unplanned outages or other cases of subsequent/concurrent failures. As an example, consider a case where replicas are deployed across different data center, with one replica per location. In the event that one of the datacenters goes offline, normally the replica that was placed in that datacenter will be packed into one of the remaining datacenters. If this is not desirable then this policy should be set. All required parameters must be populated in order to send to Azure. - :param type: Required. Constant filled by server. - :type type: str - :param domain_name: The name of the domain that should used for placement - as per this policy. + :param type: Required. The type of placement policy for a service fabric service. Following are + the possible values.Constant filled by server. Possible values include: "Invalid", + "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", + "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". + :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param domain_name: The name of the domain that should used for placement as per this policy. :type domain_name: str """ @@ -19349,40 +23565,42 @@ class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacemen 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, *, domain_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + domain_name: Optional[str] = None, + **kwargs + ): super(ServicePlacementRequireDomainDistributionPolicyDescription, self).__init__(**kwargs) + self.type = 'RequireDomainDistribution' # type: str self.domain_name = domain_name - self.type = 'RequireDomainDistribution' -class ServiceProperties(Model): +class ServiceProperties(msrest.serialization.Model): """Describes properties of a service resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. :param description: User readable description of the service. :type description: str - :param replica_count: The number of replicas of the service to create. - Defaults to 1 if not specified. + :param replica_count: The number of replicas of the service to create. Defaults to 1 if not + specified. :type replica_count: int - :param execution_policy: The execution policy of the service + :param execution_policy: The execution policy of the service. :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies - :type auto_scaling_policies: - list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :param auto_scaling_policies: Auto scaling policies. + :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the service. + :ivar status_details: Gives additional information about the current status of the service. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :ivar health_state: Describes the health state of an application resource. Possible values + include: "Invalid", "Ok", "Warning", "Error", "Unknown". :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', - this additional details from service fabric Health Manager for the user to - know why the service is marked unhealthy. + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional + details from service fabric Health Manager for the user to know why the service is marked + unhealthy. :vartype unhealthy_evaluation: str :param identity_refs: The service identity list. :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] @@ -19410,7 +23628,17 @@ class ServiceProperties(Model): 'dns_name': {'key': 'dnsName', 'type': 'str'}, } - def __init__(self, *, description: str=None, replica_count: int=None, execution_policy=None, auto_scaling_policies=None, identity_refs=None, dns_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + description: Optional[str] = None, + replica_count: Optional[int] = None, + execution_policy: Optional["ExecutionPolicy"] = None, + auto_scaling_policies: Optional[List["AutoScalingPolicy"]] = None, + identity_refs: Optional[List["ServiceIdentity"]] = None, + dns_name: Optional[str] = None, + **kwargs + ): super(ServiceProperties, self).__init__(**kwargs) self.description = description self.replica_count = replica_count @@ -19424,22 +23652,19 @@ def __init__(self, *, description: str=None, replica_count: int=None, execution_ self.dns_name = dns_name -class ServiceReplicaProperties(Model): +class ServiceReplicaProperties(msrest.serialization.Model): """Describes the properties of a service replica. All required parameters must be populated in order to send to Azure. - :param os_type: Required. The operation system required by the code in - service. Possible values include: 'Linux', 'Windows' + :param os_type: Required. The operation system required by the code in service. Possible values + include: "Linux", "Windows". :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that - forms the service. A code package describes the container and the - properties for running it. All the code packages are started together on - the same host and share the same context (network, process etc.). - :type code_packages: - list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service - needs to be part of. + :param code_packages: Required. Describes the set of code packages that forms the service. A + code package describes the container and the properties for running it. All the code packages + are started together on the same host and share the same context (network, process etc.). + :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef @@ -19457,7 +23682,15 @@ class ServiceReplicaProperties(Model): 'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsRef'}, } - def __init__(self, *, os_type, code_packages, network_refs=None, diagnostics=None, **kwargs) -> None: + def __init__( + self, + *, + os_type: Union[str, "OperatingSystemType"], + code_packages: List["ContainerCodePackageProperties"], + network_refs: Optional[List["NetworkRef"]] = None, + diagnostics: Optional["DiagnosticsRef"] = None, + **kwargs + ): super(ServiceReplicaProperties, self).__init__(**kwargs) self.os_type = os_type self.code_packages = code_packages @@ -19470,17 +23703,14 @@ class ServiceReplicaDescription(ServiceReplicaProperties): All required parameters must be populated in order to send to Azure. - :param os_type: Required. The operation system required by the code in - service. Possible values include: 'Linux', 'Windows' + :param os_type: Required. The operation system required by the code in service. Possible values + include: "Linux", "Windows". :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that - forms the service. A code package describes the container and the - properties for running it. All the code packages are started together on - the same host and share the same context (network, process etc.). - :type code_packages: - list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service - needs to be part of. + :param code_packages: Required. Describes the set of code packages that forms the service. A + code package describes the container and the properties for running it. All the code packages + are started together on the same host and share the same context (network, process etc.). + :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef @@ -19502,57 +23732,60 @@ class ServiceReplicaDescription(ServiceReplicaProperties): 'replica_name': {'key': 'replicaName', 'type': 'str'}, } - def __init__(self, *, os_type, code_packages, replica_name: str, network_refs=None, diagnostics=None, **kwargs) -> None: + def __init__( + self, + *, + os_type: Union[str, "OperatingSystemType"], + code_packages: List["ContainerCodePackageProperties"], + replica_name: str, + network_refs: Optional[List["NetworkRef"]] = None, + diagnostics: Optional["DiagnosticsRef"] = None, + **kwargs + ): super(ServiceReplicaDescription, self).__init__(os_type=os_type, code_packages=code_packages, network_refs=network_refs, diagnostics=diagnostics, **kwargs) self.replica_name = replica_name -class ServiceResourceDescription(Model): +class ServiceResourceDescription(msrest.serialization.Model): """This type describes a service resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the Service resource. :type name: str - :param os_type: Required. The operation system required by the code in - service. Possible values include: 'Linux', 'Windows' + :param os_type: Required. The operation system required by the code in service. Possible values + include: "Linux", "Windows". :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that - forms the service. A code package describes the container and the - properties for running it. All the code packages are started together on - the same host and share the same context (network, process etc.). - :type code_packages: - list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service - needs to be part of. + :param code_packages: Required. Describes the set of code packages that forms the service. A + code package describes the container and the properties for running it. All the code packages + are started together on the same host and share the same context (network, process etc.). + :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef :param description: User readable description of the service. :type description: str - :param replica_count: The number of replicas of the service to create. - Defaults to 1 if not specified. + :param replica_count: The number of replicas of the service to create. Defaults to 1 if not + specified. :type replica_count: int - :param execution_policy: The execution policy of the service + :param execution_policy: The execution policy of the service. :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies - :type auto_scaling_policies: - list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :param auto_scaling_policies: Auto scaling policies. + :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the service. + :ivar status_details: Gives additional information about the current status of the service. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. - Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :ivar health_state: Describes the health state of an application resource. Possible values + include: "Invalid", "Ok", "Warning", "Error", "Unknown". :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', - this additional details from service fabric Health Manager for the user to - know why the service is marked unhealthy. + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional + details from service fabric Health Manager for the user to know why the service is marked + unhealthy. :vartype unhealthy_evaluation: str :param identity_refs: The service identity list. :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] @@ -19588,7 +23821,22 @@ class ServiceResourceDescription(Model): 'dns_name': {'key': 'properties.dnsName', 'type': 'str'}, } - def __init__(self, *, name: str, os_type, code_packages, network_refs=None, diagnostics=None, description: str=None, replica_count: int=None, execution_policy=None, auto_scaling_policies=None, identity_refs=None, dns_name: str=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + os_type: Union[str, "OperatingSystemType"], + code_packages: List["ContainerCodePackageProperties"], + network_refs: Optional[List["NetworkRef"]] = None, + diagnostics: Optional["DiagnosticsRef"] = None, + description: Optional[str] = None, + replica_count: Optional[int] = None, + execution_policy: Optional["ExecutionPolicy"] = None, + auto_scaling_policies: Optional[List["AutoScalingPolicy"]] = None, + identity_refs: Optional[List["ServiceIdentity"]] = None, + dns_name: Optional[str] = None, + **kwargs + ): super(ServiceResourceDescription, self).__init__(**kwargs) self.name = name self.os_type = os_type @@ -19607,39 +23855,142 @@ def __init__(self, *, name: str, os_type, code_packages, network_refs=None, diag self.dns_name = dns_name +class ServiceResourceProperties(ServiceReplicaProperties, ServiceProperties): + """This type describes properties of a service resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :param description: User readable description of the service. + :type description: str + :param replica_count: The number of replicas of the service to create. Defaults to 1 if not + specified. + :type replica_count: int + :param execution_policy: The execution policy of the service. + :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy + :param auto_scaling_policies: Auto scaling policies. + :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". + :vartype status: str or ~azure.servicefabric.models.ResourceStatus + :ivar status_details: Gives additional information about the current status of the service. + :vartype status_details: str + :ivar health_state: Describes the health state of an application resource. Possible values + include: "Invalid", "Ok", "Warning", "Error", "Unknown". + :vartype health_state: str or ~azure.servicefabric.models.HealthState + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional + details from service fabric Health Manager for the user to know why the service is marked + unhealthy. + :vartype unhealthy_evaluation: str + :param identity_refs: The service identity list. + :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] + :param dns_name: Dns name of the service. + :type dns_name: str + :param os_type: Required. The operation system required by the code in service. Possible values + include: "Linux", "Windows". + :type os_type: str or ~azure.servicefabric.models.OperatingSystemType + :param code_packages: Required. Describes the set of code packages that forms the service. A + code package describes the container and the properties for running it. All the code packages + are started together on the same host and share the same context (network, process etc.). + :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service needs to be part of. + :type network_refs: list[~azure.servicefabric.models.NetworkRef] + :param diagnostics: Reference to sinks in DiagnosticsDescription. + :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef + """ + + _validation = { + 'status': {'readonly': True}, + 'status_details': {'readonly': True}, + 'health_state': {'readonly': True}, + 'unhealthy_evaluation': {'readonly': True}, + 'os_type': {'required': True}, + 'code_packages': {'required': True}, + } + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'replica_count': {'key': 'replicaCount', 'type': 'int'}, + 'execution_policy': {'key': 'executionPolicy', 'type': 'ExecutionPolicy'}, + 'auto_scaling_policies': {'key': 'autoScalingPolicies', 'type': '[AutoScalingPolicy]'}, + 'status': {'key': 'status', 'type': 'str'}, + 'status_details': {'key': 'statusDetails', 'type': 'str'}, + 'health_state': {'key': 'healthState', 'type': 'str'}, + 'unhealthy_evaluation': {'key': 'unhealthyEvaluation', 'type': 'str'}, + 'identity_refs': {'key': 'identityRefs', 'type': '[ServiceIdentity]'}, + 'dns_name': {'key': 'dnsName', 'type': 'str'}, + 'os_type': {'key': 'osType', 'type': 'str'}, + 'code_packages': {'key': 'codePackages', 'type': '[ContainerCodePackageProperties]'}, + 'network_refs': {'key': 'networkRefs', 'type': '[NetworkRef]'}, + 'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsRef'}, + } + + def __init__( + self, + *, + os_type: Union[str, "OperatingSystemType"], + code_packages: List["ContainerCodePackageProperties"], + description: Optional[str] = None, + replica_count: Optional[int] = None, + execution_policy: Optional["ExecutionPolicy"] = None, + auto_scaling_policies: Optional[List["AutoScalingPolicy"]] = None, + identity_refs: Optional[List["ServiceIdentity"]] = None, + dns_name: Optional[str] = None, + network_refs: Optional[List["NetworkRef"]] = None, + diagnostics: Optional["DiagnosticsRef"] = None, + **kwargs + ): + super(ServiceResourceProperties, self).__init__(os_type=os_type, code_packages=code_packages, network_refs=network_refs, diagnostics=diagnostics, description=description, replica_count=replica_count, execution_policy=execution_policy, auto_scaling_policies=auto_scaling_policies, identity_refs=identity_refs, dns_name=dns_name, **kwargs) + self.description = description + self.replica_count = replica_count + self.execution_policy = execution_policy + self.auto_scaling_policies = auto_scaling_policies + self.status = None + self.status_details = None + self.health_state = None + self.unhealthy_evaluation = None + self.identity_refs = identity_refs + self.dns_name = dns_name + self.os_type = os_type + self.code_packages = code_packages + self.network_refs = network_refs + self.diagnostics = diagnostics + + class ServicesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for services of a certain service type - belonging to an application, containing health evaluations for each - unhealthy service that impacted current aggregated health state. Can be - returned when evaluating application health and the aggregated health state - is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for services of a certain service type belonging to an application, containing health evaluations for each unhealthy service that impacted current aggregated health state. Can be returned when evaluating application health and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str :param service_type_name: Name of the service type of the services. :type service_type_name: str - :param max_percent_unhealthy_services: Maximum allowed percentage of - unhealthy services from the ServiceTypeHealthPolicy. + :param max_percent_unhealthy_services: Maximum allowed percentage of unhealthy services from + the ServiceTypeHealthPolicy. :type max_percent_unhealthy_services: int - :param total_count: Total number of services of the current service type - in the application from the health store. + :param total_count: Total number of services of the current service type in the application + from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - ServiceHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy ServiceHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -19647,57 +23998,62 @@ class ServicesHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, service_type_name: str=None, max_percent_unhealthy_services: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + service_type_name: Optional[str] = None, + max_percent_unhealthy_services: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(ServicesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'Services' # type: str self.service_type_name = service_type_name self.max_percent_unhealthy_services = max_percent_unhealthy_services self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'Services' -class ServiceTypeDescription(Model): - """Describes a service type defined in the service manifest of a provisioned - application type. The properties the ones defined in the service manifest. +class ServiceTypeDescription(msrest.serialization.Model): + """Describes a service type defined in the service manifest of a provisioned application type. The properties the ones defined in the service manifest. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceTypeDescription, - StatelessServiceTypeDescription + sub-classes are: StatefulServiceTypeDescription, StatelessServiceTypeDescription. All required parameters must be populated in order to send to Azure. - :param is_stateful: Indicates whether the service type is a stateful - service type or a stateless service type. This property is true if the - service type is a stateful service type, false otherwise. + :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. + Possible values include: "Invalid", "Stateless", "Stateful". + :type kind: str or ~azure.servicefabric.models.ServiceKind + :param is_stateful: Indicates whether the service type is a stateful service type or a + stateless service type. This property is true if the service type is a stateful service type, + false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when - instantiating this service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when instantiating this + service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy - descriptions. + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: - list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param kind: Required. Constant filled by server. - :type kind: str + :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] """ _validation = { @@ -19705,31 +24061,41 @@ class ServiceTypeDescription(Model): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, - 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'Stateful': 'StatefulServiceTypeDescription', 'Stateless': 'StatelessServiceTypeDescription'} } - def __init__(self, *, is_stateful: bool=None, service_type_name: str=None, placement_constraints: str=None, load_metrics=None, service_placement_policies=None, extensions=None, **kwargs) -> None: + def __init__( + self, + *, + is_stateful: Optional[bool] = None, + service_type_name: Optional[str] = None, + placement_constraints: Optional[str] = None, + load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, + service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, + extensions: Optional[List["ServiceTypeExtensionDescription"]] = None, + **kwargs + ): super(ServiceTypeDescription, self).__init__(**kwargs) + self.kind = None # type: Optional[str] self.is_stateful = is_stateful self.service_type_name = service_type_name self.placement_constraints = placement_constraints self.load_metrics = load_metrics self.service_placement_policies = service_placement_policies self.extensions = extensions - self.kind = None -class ServiceTypeExtensionDescription(Model): +class ServiceTypeExtensionDescription(msrest.serialization.Model): """Describes extension of a service type defined in the service manifest. :param key: The name of the extension. @@ -19743,51 +24109,56 @@ class ServiceTypeExtensionDescription(Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__(self, *, key: str=None, value: str=None, **kwargs) -> None: + def __init__( + self, + *, + key: Optional[str] = None, + value: Optional[str] = None, + **kwargs + ): super(ServiceTypeExtensionDescription, self).__init__(**kwargs) self.key = key self.value = value -class ServiceTypeHealthPolicy(Model): - """Represents the health policy used to evaluate the health of services - belonging to a service type. - - :param max_percent_unhealthy_partitions_per_service: The maximum allowed - percentage of unhealthy partitions per service. Allowed values are Byte - values from zero to 100 - The percentage represents the maximum tolerated percentage of partitions - that can be unhealthy before the service is considered in error. - If the percentage is respected but there is at least one unhealthy - partition, the health is evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy - partitions over the total number of partitions in the service. - The computation rounds up to tolerate one failure on small numbers of - partitions. Default percentage is zero. Default value: 0 . +class ServiceTypeHealthPolicy(msrest.serialization.Model): + """Represents the health policy used to evaluate the health of services belonging to a service type. + + :param max_percent_unhealthy_partitions_per_service: The maximum allowed percentage of + unhealthy partitions per service. Allowed values are Byte values from zero to 100 + + The percentage represents the maximum tolerated percentage of partitions that can be unhealthy + before the service is considered in error. + If the percentage is respected but there is at least one unhealthy partition, the health is + evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy partitions over the total + number of partitions in the service. + The computation rounds up to tolerate one failure on small numbers of partitions. Default + percentage is zero. :type max_percent_unhealthy_partitions_per_service: int - :param max_percent_unhealthy_replicas_per_partition: The maximum allowed - percentage of unhealthy replicas per partition. Allowed values are Byte - values from zero to 100. - The percentage represents the maximum tolerated percentage of replicas - that can be unhealthy before the partition is considered in error. - If the percentage is respected but there is at least one unhealthy - replica, the health is evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy replicas - over the total number of replicas in the partition. - The computation rounds up to tolerate one failure on small numbers of - replicas. Default percentage is zero. Default value: 0 . + :param max_percent_unhealthy_replicas_per_partition: The maximum allowed percentage of + unhealthy replicas per partition. Allowed values are Byte values from zero to 100. + + The percentage represents the maximum tolerated percentage of replicas that can be unhealthy + before the partition is considered in error. + If the percentage is respected but there is at least one unhealthy replica, the health is + evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy replicas over the total + number of replicas in the partition. + The computation rounds up to tolerate one failure on small numbers of replicas. Default + percentage is zero. :type max_percent_unhealthy_replicas_per_partition: int - :param max_percent_unhealthy_services: The maximum allowed percentage of - unhealthy services. Allowed values are Byte values from zero to 100. - The percentage represents the maximum tolerated percentage of services - that can be unhealthy before the application is considered in error. - If the percentage is respected but there is at least one unhealthy - service, the health is evaluated as Warning. - This is calculated by dividing the number of unhealthy services of the - specific service type over the total number of services of the specific - service type. - The computation rounds up to tolerate one failure on small numbers of - services. Default percentage is zero. Default value: 0 . + :param max_percent_unhealthy_services: The maximum allowed percentage of unhealthy services. + Allowed values are Byte values from zero to 100. + + The percentage represents the maximum tolerated percentage of services that can be unhealthy + before the application is considered in error. + If the percentage is respected but there is at least one unhealthy service, the health is + evaluated as Warning. + This is calculated by dividing the number of unhealthy services of the specific service type + over the total number of services of the specific service type. + The computation rounds up to tolerate one failure on small numbers of services. Default + percentage is zero. :type max_percent_unhealthy_services: int """ @@ -19797,23 +24168,30 @@ class ServiceTypeHealthPolicy(Model): 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, } - def __init__(self, *, max_percent_unhealthy_partitions_per_service: int=0, max_percent_unhealthy_replicas_per_partition: int=0, max_percent_unhealthy_services: int=0, **kwargs) -> None: + def __init__( + self, + *, + max_percent_unhealthy_partitions_per_service: Optional[int] = 0, + max_percent_unhealthy_replicas_per_partition: Optional[int] = 0, + max_percent_unhealthy_services: Optional[int] = 0, + **kwargs + ): super(ServiceTypeHealthPolicy, self).__init__(**kwargs) self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition self.max_percent_unhealthy_services = max_percent_unhealthy_services -class ServiceTypeHealthPolicyMapItem(Model): +class ServiceTypeHealthPolicyMapItem(msrest.serialization.Model): """Defines an item in ServiceTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the service type health policy map item. - This is the name of the service type. + :param key: Required. The key of the service type health policy map item. This is the name of + the service type. :type key: str - :param value: Required. The value of the service type health policy map - item. This is the ServiceTypeHealthPolicy for this service type. + :param value: Required. The value of the service type health policy map item. This is the + ServiceTypeHealthPolicy for this service type. :type value: ~azure.servicefabric.models.ServiceTypeHealthPolicy """ @@ -19827,29 +24205,32 @@ class ServiceTypeHealthPolicyMapItem(Model): 'value': {'key': 'Value', 'type': 'ServiceTypeHealthPolicy'}, } - def __init__(self, *, key: str, value, **kwargs) -> None: + def __init__( + self, + *, + key: str, + value: "ServiceTypeHealthPolicy", + **kwargs + ): super(ServiceTypeHealthPolicyMapItem, self).__init__(**kwargs) self.key = key self.value = value -class ServiceTypeInfo(Model): - """Information about a service type that is defined in a service manifest of a - provisioned application type. +class ServiceTypeInfo(msrest.serialization.Model): + """Information about a service type that is defined in a service manifest of a provisioned application type. - :param service_type_description: Describes a service type defined in the - service manifest of a provisioned application type. The properties the - ones defined in the service manifest. - :type service_type_description: - ~azure.servicefabric.models.ServiceTypeDescription - :param service_manifest_name: The name of the service manifest in which - this service type is defined. + :param service_type_description: Describes a service type defined in the service manifest of a + provisioned application type. The properties the ones defined in the service manifest. + :type service_type_description: ~azure.servicefabric.models.ServiceTypeDescription + :param service_manifest_name: The name of the service manifest in which this service type is + defined. :type service_manifest_name: str - :param service_manifest_version: The version of the service manifest in - which this service type is defined. + :param service_manifest_version: The version of the service manifest in which this service type + is defined. :type service_manifest_version: str - :param is_service_group: Indicates whether the service is a service group. - If it is, the property value is true otherwise false. + :param is_service_group: Indicates whether the service is a service group. If it is, the + property value is true otherwise false. :type is_service_group: bool """ @@ -19860,7 +24241,15 @@ class ServiceTypeInfo(Model): 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, } - def __init__(self, *, service_type_description=None, service_manifest_name: str=None, service_manifest_version: str=None, is_service_group: bool=None, **kwargs) -> None: + def __init__( + self, + *, + service_type_description: Optional["ServiceTypeDescription"] = None, + service_manifest_name: Optional[str] = None, + service_manifest_version: Optional[str] = None, + is_service_group: Optional[bool] = None, + **kwargs + ): super(ServiceTypeInfo, self).__init__(**kwargs) self.service_type_description = service_type_description self.service_manifest_name = service_manifest_name @@ -19868,9 +24257,8 @@ def __init__(self, *, service_type_description=None, service_manifest_name: str= self.is_service_group = is_service_group -class ServiceTypeManifest(Model): - """Contains the manifest describing a service type registered as part of an - application in a Service Fabric cluster. +class ServiceTypeManifest(msrest.serialization.Model): + """Contains the manifest describing a service type registered as part of an application in a Service Fabric cluster. :param manifest: The XML manifest as a string. :type manifest: str @@ -19880,87 +24268,91 @@ class ServiceTypeManifest(Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__(self, *, manifest: str=None, **kwargs) -> None: + def __init__( + self, + *, + manifest: Optional[str] = None, + **kwargs + ): super(ServiceTypeManifest, self).__init__(**kwargs) self.manifest = manifest -class ServiceUpdateDescription(Model): - """A ServiceUpdateDescription contains all of the information necessary to - update a service. +class ServiceUpdateDescription(msrest.serialization.Model): + """A ServiceUpdateDescription contains all of the information necessary to update a service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceUpdateDescription, - StatelessServiceUpdateDescription - - All required parameters must be populated in order to send to Azure. - - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 6 then the flags for - ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. - - None - Does not indicate any other properties are set. The value is - zero. - - TargetReplicaSetSize/InstanceCount - Indicates whether the - TargetReplicaSetSize property (for Stateful services) or the InstanceCount - property (for Stateless services) is set. The value is 1. - - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration - property is set. The value is 2. - - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property - is set. The value is 4. - - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration - property is set. The value is 8. - - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The - value is 16. - - PlacementConstraints - Indicates the PlacementConstraints property is - set. The value is 32. - - PlacementPolicyList - Indicates the ServicePlacementPolicies property is - set. The value is 64. - - Correlation - Indicates the CorrelationScheme property is set. The value - is 128. - - Metrics - Indicates the ServiceLoadMetrics property is set. The value is - 256. - - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The - value is 512. - - ScalingPolicy - Indicates the ScalingPolicies property is set. The value - is 1024. - - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit - property is set. The value is 2048. - - MinInstanceCount - Indicates the MinInstanceCount property is set. The - value is 4096. - - MinInstancePercentage - Indicates the MinInstancePercentage property is - set. The value is 8192. - - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration - property is set. The value is 16384. - - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property - is set. The value is 32768. + sub-classes are: StatefulServiceUpdateDescription, StatelessServiceUpdateDescription. + + All required parameters must be populated in order to send to Azure. + + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and + QuorumLossWaitDuration (4) are set. + + + * None - Does not indicate any other properties are set. The value is zero. + * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property + (for Stateful services) or the InstanceCount property (for Stateless services) is set. The + value is 1. + * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The + value is 2. + * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is + 4. + * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The + value is 8. + * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. + * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. + * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is + 64. + * Correlation - Indicates the CorrelationScheme property is set. The value is 128. + * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. + * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. + * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. + * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The + value is 2048. + * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. + * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is + 8192. + * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 16384. + * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 32768. + * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value + is 65536. + * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. + * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. + * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_dns_name: The DNS name of the service. + :type service_dns_name: str + :param tags_for_placement: Tags for placement of this service. + :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription + :param tags_for_running: Tags for running of this service. + :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription """ _validation = { @@ -19968,6 +24360,7 @@ class ServiceUpdateDescription(Model): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -19975,15 +24368,32 @@ class ServiceUpdateDescription(Model): 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, + 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceUpdateDescription', 'Stateless': 'StatelessServiceUpdateDescription'} } - def __init__(self, *, flags: str=None, placement_constraints: str=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, scaling_policies=None, **kwargs) -> None: + def __init__( + self, + *, + flags: Optional[str] = None, + placement_constraints: Optional[str] = None, + correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, + load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, + service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, + default_move_cost: Optional[Union[str, "MoveCost"]] = None, + scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, + service_dns_name: Optional[str] = None, + tags_for_placement: Optional["NodeTagsDescription"] = None, + tags_for_running: Optional["NodeTagsDescription"] = None, + **kwargs + ): super(ServiceUpdateDescription, self).__init__(**kwargs) + self.service_kind = None # type: Optional[str] self.flags = flags self.placement_constraints = placement_constraints self.correlation_scheme = correlation_scheme @@ -19991,20 +24401,21 @@ def __init__(self, *, flags: str=None, placement_constraints: str=None, correlat self.service_placement_policies = service_placement_policies self.default_move_cost = default_move_cost self.scaling_policies = scaling_policies - self.service_kind = None + self.service_dns_name = service_dns_name + self.tags_for_placement = tags_for_placement + self.tags_for_running = tags_for_running -class ServiceUpgradeProgress(Model): - """Information about how many replicas are completed or pending for a specific - service during upgrade. +class ServiceUpgradeProgress(msrest.serialization.Model): + """Information about how many replicas are completed or pending for a specific service during upgrade. :param service_name: Name of the Service resource. :type service_name: str - :param completed_replica_count: The number of replicas that completes the - upgrade in the service. + :param completed_replica_count: The number of replicas that completes the upgrade in the + service. :type completed_replica_count: str - :param pending_replica_count: The number of replicas that are waiting to - be upgraded in the service. + :param pending_replica_count: The number of replicas that are waiting to be upgraded in the + service. :type pending_replica_count: str """ @@ -20014,26 +24425,29 @@ class ServiceUpgradeProgress(Model): 'pending_replica_count': {'key': 'PendingReplicaCount', 'type': 'str'}, } - def __init__(self, *, service_name: str=None, completed_replica_count: str=None, pending_replica_count: str=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + completed_replica_count: Optional[str] = None, + pending_replica_count: Optional[str] = None, + **kwargs + ): super(ServiceUpgradeProgress, self).__init__(**kwargs) self.service_name = service_name self.completed_replica_count = completed_replica_count self.pending_replica_count = pending_replica_count -class Setting(Model): - """Describes a setting for the container. The setting file path can be fetched - from environment variable "Fabric_SettingPath". The path for Windows - container is "C:\\secrets". The path for Linux container is "/var/secrets". +class Setting(msrest.serialization.Model): + """Describes a setting for the container. The setting file path can be fetched from environment variable "Fabric_SettingPath". The path for Windows container is "C:\secrets". The path for Linux container is "/var/secrets". - :param type: The type of the setting being given in value. Possible values - include: 'ClearText', 'KeyVaultReference', 'SecretValueReference'. Default - value: "ClearText" . + :param type: The type of the setting being given in value. Possible values include: + "ClearText", "KeyVaultReference", "SecretValueReference". Default value: "ClearText". :type type: str or ~azure.servicefabric.models.SettingType :param name: The name of the setting. :type name: str - :param value: The value of the setting, will be processed based on the - type provided. + :param value: The value of the setting, will be processed based on the type provided. :type value: str """ @@ -20043,7 +24457,14 @@ class Setting(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, *, type="ClearText", name: str=None, value: str=None, **kwargs) -> None: + def __init__( + self, + *, + type: Optional[Union[str, "SettingType"]] = "ClearText", + name: Optional[str] = None, + value: Optional[str] = None, + **kwargs + ): super(Setting, self).__init__(**kwargs) self.type = type self.name = name @@ -20051,20 +24472,19 @@ def __init__(self, *, type="ClearText", name: str=None, value: str=None, **kwarg class SingletonPartitionInformation(PartitionInformation): - """Information about a partition that is singleton. The services with - singleton partitioning scheme are effectively non-partitioned. They only - have one partition. + """Information about a partition that is singleton. The services with singleton partitioning scheme are effectively non-partitioned. They only have one partition. All required parameters must be populated in order to send to Azure. - :param id: An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. - The partition ID is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the IDs of its - partitions would be different. + :param service_partition_kind: Required. The kind of partitioning scheme used to partition the + service.Constant filled by server. Possible values include: "Invalid", "Singleton", + "Int64Range", "Named". + :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a + randomly generated GUID when the service was created. The partition ID is unique and does not + change for the lifetime of the service. If the same service was deleted and recreated the IDs + of its partitions would be different. :type id: str - :param service_partition_kind: Required. Constant filled by server. - :type service_partition_kind: str """ _validation = { @@ -20072,23 +24492,28 @@ class SingletonPartitionInformation(PartitionInformation): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'str'}, } - def __init__(self, *, id: str=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + **kwargs + ): super(SingletonPartitionInformation, self).__init__(id=id, **kwargs) - self.service_partition_kind = 'Singleton' + self.service_partition_kind = 'Singleton' # type: str class SingletonPartitionSchemeDescription(PartitionSchemeDescription): - """Describes the partition scheme of a singleton-partitioned, or - non-partitioned service. + """Describes the partition scheme of a singleton-partitioned, or non-partitioned service. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Constant filled by server. - :type partition_scheme: str + :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by + server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". + :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme """ _validation = { @@ -20099,76 +24524,66 @@ class SingletonPartitionSchemeDescription(PartitionSchemeDescription): 'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'}, } - def __init__(self, **kwargs) -> None: + def __init__( + self, + **kwargs + ): super(SingletonPartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = 'Singleton' + self.partition_scheme = 'Singleton' # type: str -class StartClusterUpgradeDescription(Model): +class StartClusterUpgradeDescription(msrest.serialization.Model): """Describes the parameters for starting a cluster upgrade. :param code_version: The cluster code version. :type code_version: str :param config_version: The cluster configuration version. :type config_version: str - :param upgrade_kind: The kind of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling'. Default value: - "Rolling" . + :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values + include: "Invalid", "Rolling". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and - Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The + values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: + "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: + "UnmonitoredAuto". :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of - time to block processing of an upgrade domain and prevent loss of - availability when there are unexpected issues. When this timeout expires, - processing of the upgrade domain will proceed regardless of availability - loss issues. The timeout is reset at the start of each upgrade domain. - Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit - integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when there are unexpected + issues. When this timeout expires, processing of the upgrade domain will proceed regardless of + availability loss issues. The timeout is reset at the start of each upgrade domain. Valid + values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted - during upgrade even when the code version has not changed (the upgrade - only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted during upgrade even when + the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through - the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', - 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default - value: "Default" . + :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible + values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", + "ReverseLexicographical". Default value: "Default". :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param monitoring_policy: Describes the parameters for monitoring an - upgrade in Monitored mode. - :type monitoring_policy: - ~azure.servicefabric.models.MonitoringPolicyDescription - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param enable_delta_health_evaluation: When true, enables delta health - evaluation rather than absolute health evaluation after completion of each - upgrade domain. + :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. + :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than + absolute health evaluation after completion of each upgrade domain. :type enable_delta_health_evaluation: bool - :param cluster_upgrade_health_policy: Defines a health policy used to - evaluate the health of the cluster during a cluster upgrade. + :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of + the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Defines the application health - policy map used to evaluate the health of an application or one of its - children entities. - :type application_health_policy_map: - ~azure.servicefabric.models.ApplicationHealthPolicies - :param instance_close_delay_duration_in_seconds: Duration in seconds, to - wait before a stateless instance is closed, to allow the active requests - to drain gracefully. This would be effective when the instance is closing - during the application/cluster - upgrade, only for those instances which have a non-zero delay duration - configured in the service description. See - InstanceCloseDelayDurationSeconds property in $ref: + :param application_health_policy_map: Defines the application health policy map used to + evaluate the health of an application or one of its children entities. + :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicies + :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a + stateless instance is closed, to allow the active requests to drain gracefully. This would be + effective when the instance is closing during the application/cluster + upgrade, only for those instances which have a non-zero delay duration configured in the + service description. See InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is - 4294967295, which indicates that the behavior will entirely depend on the - delay configured in the stateless service description. + Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates + that the behavior will entirely depend on the delay configured in the stateless service + description. :type instance_close_delay_duration_in_seconds: long """ @@ -20188,7 +24603,24 @@ class StartClusterUpgradeDescription(Model): 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, } - def __init__(self, *, code_version: str=None, config_version: str=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, sort_order="Default", monitoring_policy=None, cluster_health_policy=None, enable_delta_health_evaluation: bool=None, cluster_upgrade_health_policy=None, application_health_policy_map=None, instance_close_delay_duration_in_seconds: int=None, **kwargs) -> None: + def __init__( + self, + *, + code_version: Optional[str] = None, + config_version: Optional[str] = None, + upgrade_kind: Optional[Union[str, "UpgradeKind"]] = "Rolling", + rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", + upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, + force_restart: Optional[bool] = False, + sort_order: Optional[Union[str, "UpgradeSortOrder"]] = "Default", + monitoring_policy: Optional["MonitoringPolicyDescription"] = None, + cluster_health_policy: Optional["ClusterHealthPolicy"] = None, + enable_delta_health_evaluation: Optional[bool] = None, + cluster_upgrade_health_policy: Optional["ClusterUpgradeHealthPolicyObject"] = None, + application_health_policy_map: Optional["ApplicationHealthPolicies"] = None, + instance_close_delay_duration_in_seconds: Optional[int] = 4294967295, + **kwargs + ): super(StartClusterUpgradeDescription, self).__init__(**kwargs) self.code_version = code_version self.config_version = config_version @@ -20210,31 +24642,37 @@ class StartedChaosEvent(ChaosEvent): All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param chaos_parameters: Defines all the parameters to configure a Chaos - run. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param chaos_parameters: Defines all the parameters to configure a Chaos run. :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'ChaosParameters'}, } - def __init__(self, *, time_stamp_utc, chaos_parameters=None, **kwargs) -> None: + def __init__( + self, + *, + time_stamp_utc: datetime.datetime, + chaos_parameters: Optional["ChaosParameters"] = None, + **kwargs + ): super(StartedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.kind = 'Started' # type: str self.chaos_parameters = chaos_parameters - self.kind = 'Started' class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): @@ -20242,31 +24680,48 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param replica_instance_id: Required. Id of Replica instance. :type replica_instance_id: long @@ -20282,17 +24737,16 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'replica_instance_id': {'required': True}, @@ -20307,11 +24761,11 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, @@ -20325,8 +24779,28 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, replica_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + replica_id: int, + replica_instance_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(StatefulReplicaHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.kind = 'StatefulReplicaHealthReportExpired' # type: str self.replica_instance_id = replica_instance_id self.source_id = source_id self.property = property @@ -20336,7 +24810,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, rep self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'StatefulReplicaHealthReportExpired' class StatefulReplicaNewHealthReportEvent(ReplicaEvent): @@ -20344,31 +24817,48 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param replica_instance_id: Required. Id of Replica instance. :type replica_instance_id: long @@ -20384,17 +24874,16 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'replica_instance_id': {'required': True}, @@ -20409,11 +24898,11 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, @@ -20427,8 +24916,28 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, replica_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + replica_id: int, + replica_instance_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(StatefulReplicaNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.kind = 'StatefulReplicaNewHealthReport' # type: str self.replica_instance_id = replica_instance_id self.source_id = source_id self.property = property @@ -20438,7 +24947,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, rep self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'StatefulReplicaNewHealthReport' class StatefulServiceDescription(ServiceDescription): @@ -20446,111 +24954,105 @@ class StatefulServiceDescription(ServiceDescription): All required parameters must be populated in order to send to Azure. - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' - URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified - in the service manifest. + :param service_type_name: Required. Name of the service type as specified in the service + manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. - Initialization data is passed to service instances or replicas when they - are created. + :param initialization_data: The initialization data as an array of bytes. Initialization data + is passed to service instances or replicas when they are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an - object. - :type partition_description: - ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an object. + :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost - property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service - package to be used for a service. Possible values include: - 'SharedProcess', 'ExclusiveProcess' + :param service_package_activation_mode: The activation mode of service package to be used for a + service. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS - system service to be enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS system service to be + enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param target_replica_set_size: Required. The target replica set size as a - number. + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param tags_required_to_place: Tags for placement of this service. + :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :param tags_required_to_run: Tags for running of this service. + :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription + :param target_replica_set_size: Required. The target replica set size as a number. :type target_replica_set_size: int - :param min_replica_set_size: Required. The minimum replica set size as a - number. + :param min_replica_set_size: Required. The minimum replica set size as a number. :type min_replica_set_size: int - :param has_persisted_state: Required. A flag indicating whether this is a - persistent service which stores states on the local disk. If it is then - the value of this property is true, if not it is false. + :param has_persisted_state: Required. A flag indicating whether this is a persistent service + which stores states on the local disk. If it is then the value of this property is true, if not + it is false. :type has_persisted_state: bool - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 6 then the flags for - QuorumLossWaitDuration (2) and StandByReplicaKeepDuration(4) are set. - - None - Does not indicate any other properties are set. The value is - zero. - - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration - property is set. The value is 1. - - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property - is set. The value is 2. - - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration - property is set. The value is 4. - - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit - property is set. The value is 8. - - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property - is set. The value is 16. + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then the flags for QuorumLossWaitDuration (2) and + StandByReplicaKeepDuration(4) are set. + + + * None - Does not indicate any other properties are set. The value is zero. + * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The + value is 1. + * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is + 2. + * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The + value is 4. + * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The + value is 8. + * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value + is 16. :type flags: int - :param replica_restart_wait_duration_seconds: The duration, in seconds, - between when a replica goes down and when a new replica is created. + :param replica_restart_wait_duration_seconds: The duration, in seconds, between when a replica + goes down and when a new replica is created. :type replica_restart_wait_duration_seconds: long - :param quorum_loss_wait_duration_seconds: The maximum duration, in - seconds, for which a partition is allowed to be in a state of quorum loss. + :param quorum_loss_wait_duration_seconds: The maximum duration, in seconds, for which a + partition is allowed to be in a state of quorum loss. :type quorum_loss_wait_duration_seconds: long - :param stand_by_replica_keep_duration_seconds: The definition on how long - StandBy replicas should be maintained before being removed. + :param stand_by_replica_keep_duration_seconds: The definition on how long StandBy replicas + should be maintained before being removed. :type stand_by_replica_keep_duration_seconds: long - :param service_placement_time_limit_seconds: The duration for which - replicas can stay InBuild before reporting that build is stuck. + :param service_placement_time_limit_seconds: The duration for which replicas can stay InBuild + before reporting that build is stuck. :type service_placement_time_limit_seconds: long - :param drop_source_replica_on_move: Indicates whether to drop source - Secondary replica even if the target replica has not finished build. If - desired behavior is to drop it as soon as possible the value of this - property is true, if not it is false. + :param drop_source_replica_on_move: Indicates whether to drop source Secondary replica even if + the target replica has not finished build. If desired behavior is to drop it as soon as + possible the value of this property is true, if not it is false. :type drop_source_replica_on_move: bool + :param replica_lifecycle_description: Defines how replicas of this service will behave during + their lifecycle. + :type replica_lifecycle_description: ~azure.servicefabric.models.ReplicaLifecycleDescription """ _validation = { + 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, - 'service_kind': {'required': True}, 'target_replica_set_size': {'required': True, 'minimum': 1}, 'min_replica_set_size': {'required': True, 'minimum': 1}, 'has_persisted_state': {'required': True}, @@ -20561,6 +25063,7 @@ class StatefulServiceDescription(ServiceDescription): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -20575,7 +25078,8 @@ class StatefulServiceDescription(ServiceDescription): 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, + 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, @@ -20585,10 +25089,42 @@ class StatefulServiceDescription(ServiceDescription): 'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'long'}, 'service_placement_time_limit_seconds': {'key': 'ServicePlacementTimeLimitSeconds', 'type': 'long'}, 'drop_source_replica_on_move': {'key': 'DropSourceReplicaOnMove', 'type': 'bool'}, - } - - def __init__(self, *, service_name: str, service_type_name: str, partition_description, target_replica_set_size: int, min_replica_set_size: int, has_persisted_state: bool, application_name: str=None, initialization_data=None, placement_constraints: str=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified: bool=None, service_package_activation_mode=None, service_dns_name: str=None, scaling_policies=None, flags: int=None, replica_restart_wait_duration_seconds: int=None, quorum_loss_wait_duration_seconds: int=None, stand_by_replica_keep_duration_seconds: int=None, service_placement_time_limit_seconds: int=None, drop_source_replica_on_move: bool=None, **kwargs) -> None: - super(StatefulServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name, scaling_policies=scaling_policies, **kwargs) + 'replica_lifecycle_description': {'key': 'ReplicaLifecycleDescription', 'type': 'ReplicaLifecycleDescription'}, + } + + def __init__( + self, + *, + service_name: str, + service_type_name: str, + partition_description: "PartitionSchemeDescription", + target_replica_set_size: int, + min_replica_set_size: int, + has_persisted_state: bool, + application_name: Optional[str] = None, + initialization_data: Optional[List[int]] = None, + placement_constraints: Optional[str] = None, + correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, + service_load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, + service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, + default_move_cost: Optional[Union[str, "MoveCost"]] = None, + is_default_move_cost_specified: Optional[bool] = None, + service_package_activation_mode: Optional[Union[str, "ServicePackageActivationMode"]] = None, + service_dns_name: Optional[str] = None, + scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, + tags_required_to_place: Optional["NodeTagsDescription"] = None, + tags_required_to_run: Optional["NodeTagsDescription"] = None, + flags: Optional[int] = None, + replica_restart_wait_duration_seconds: Optional[int] = None, + quorum_loss_wait_duration_seconds: Optional[int] = None, + stand_by_replica_keep_duration_seconds: Optional[int] = None, + service_placement_time_limit_seconds: Optional[int] = None, + drop_source_replica_on_move: Optional[bool] = None, + replica_lifecycle_description: Optional["ReplicaLifecycleDescription"] = None, + **kwargs + ): + super(StatefulServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name, scaling_policies=scaling_policies, tags_required_to_place=tags_required_to_place, tags_required_to_run=tags_required_to_run, **kwargs) + self.service_kind = 'Stateful' # type: str self.target_replica_set_size = target_replica_set_size self.min_replica_set_size = min_replica_set_size self.has_persisted_state = has_persisted_state @@ -20598,7 +25134,7 @@ def __init__(self, *, service_name: str, service_type_name: str, partition_descr self.stand_by_replica_keep_duration_seconds = stand_by_replica_keep_duration_seconds self.service_placement_time_limit_seconds = service_placement_time_limit_seconds self.drop_source_replica_on_move = drop_source_replica_on_move - self.service_kind = 'Stateful' + self.replica_lifecycle_description = replica_lifecycle_description class StatefulServiceInfo(ServiceInfo): @@ -20606,33 +25142,31 @@ class StatefulServiceInfo(ServiceInfo): All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded - representation of the service name. This is used in the REST APIs to - identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param id: The identity of the service. This ID is an encoded representation of the service + name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type id: str + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service - manifest. + :param type_name: Name of the service type as specified in the service manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values - include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', - 'Failed' + :param service_status: The status of the application. Possible values include: "Unknown", + "Active", "Upgrading", "Deleting", "Creating", "Failed". :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool - :param service_kind: Required. Constant filled by server. - :type service_kind: str :param has_persisted_state: Whether the service has persisted state. :type has_persisted_state: bool """ @@ -20643,20 +25177,32 @@ class StatefulServiceInfo(ServiceInfo): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, has_persisted_state: bool=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + type_name: Optional[str] = None, + manifest_version: Optional[str] = None, + health_state: Optional[Union[str, "HealthState"]] = None, + service_status: Optional[Union[str, "ServiceStatus"]] = None, + is_service_group: Optional[bool] = None, + has_persisted_state: Optional[bool] = None, + **kwargs + ): super(StatefulServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group, **kwargs) + self.service_kind = 'Stateful' # type: str self.has_persisted_state = has_persisted_state - self.service_kind = 'Stateful' class StatefulServicePartitionInfo(ServicePartitionInfo): @@ -20664,35 +25210,31 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): All required parameters must be populated in order to send to Azure. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service - partition. Possible values include: 'Invalid', 'Ready', 'NotReady', - 'InQuorumLoss', 'Reconfiguring', 'Deleting' - :type partition_status: str or - ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, - partitioning scheme and keys supported by it. - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :param partition_status: The status of the service fabric service partition. Possible values + include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". + :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, partitioning scheme and + keys supported by it. + :type partition_information: ~azure.servicefabric.models.PartitionInformation :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: long :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: long - :param last_quorum_loss_duration: The duration for which this partition - was in quorum loss. If the partition is currently in quorum loss, it - returns the duration since it has been in that state. This field is using - ISO8601 format for specifying the duration. - :type last_quorum_loss_duration: timedelta - :param primary_epoch: An Epoch is a configuration number for the partition - as a whole. When the configuration of the replica set changes, for example - when the Primary replica changes, the operations that are replicated from - the new Primary replica are said to be a new Epoch from the ones which - were sent by the old Primary replica. + :param last_quorum_loss_duration: The duration for which this partition was in quorum loss. If + the partition is currently in quorum loss, it returns the duration since it has been in that + state. This field is using ISO8601 format for specifying the duration. + :type last_quorum_loss_duration: ~datetime.timedelta + :param primary_epoch: An Epoch is a configuration number for the partition as a whole. When the + configuration of the replica set changes, for example when the Primary replica changes, the + operations that are replicated from the new Primary replica are said to be a new Epoch from the + ones which were sent by the old Primary replica. :type primary_epoch: ~azure.servicefabric.models.Epoch """ @@ -20701,60 +25243,67 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'long'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'long'}, 'last_quorum_loss_duration': {'key': 'LastQuorumLossDuration', 'type': 'duration'}, 'primary_epoch': {'key': 'PrimaryEpoch', 'type': 'Epoch'}, } - def __init__(self, *, health_state=None, partition_status=None, partition_information=None, target_replica_set_size: int=None, min_replica_set_size: int=None, last_quorum_loss_duration=None, primary_epoch=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + partition_status: Optional[Union[str, "ServicePartitionStatus"]] = None, + partition_information: Optional["PartitionInformation"] = None, + target_replica_set_size: Optional[int] = None, + min_replica_set_size: Optional[int] = None, + last_quorum_loss_duration: Optional[datetime.timedelta] = None, + primary_epoch: Optional["Epoch"] = None, + **kwargs + ): super(StatefulServicePartitionInfo, self).__init__(health_state=health_state, partition_status=partition_status, partition_information=partition_information, **kwargs) + self.service_kind = 'Stateful' # type: str self.target_replica_set_size = target_replica_set_size self.min_replica_set_size = min_replica_set_size self.last_quorum_loss_duration = last_quorum_loss_duration self.primary_epoch = primary_epoch - self.service_kind = 'Stateful' class StatefulServiceReplicaHealth(ReplicaHealth): """Represents the health of the stateful service replica. - Contains the replica aggregated health state, the health events and the - unhealthy evaluations. +Contains the replica aggregated health state, the health events and the unhealthy evaluations. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str """ @@ -20767,41 +25316,46 @@ class StatefulServiceReplicaHealth(ReplicaHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, replica_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + partition_id: Optional[str] = None, + replica_id: Optional[str] = None, + **kwargs + ): super(StatefulServiceReplicaHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, partition_id=partition_id, **kwargs) + self.service_kind = 'Stateful' # type: str self.replica_id = replica_id - self.service_kind = 'Stateful' class StatefulServiceReplicaHealthState(ReplicaHealthState): - """Represents the health state of the stateful service replica, which contains - the replica ID and the aggregated health state. + """Represents the health state of the stateful service replica, which contains the replica ID and the aggregated health state. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param partition_id: The ID of the partition to which this replica - belongs. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param partition_id: The ID of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str """ @@ -20811,52 +25365,53 @@ class StatefulServiceReplicaHealthState(ReplicaHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, partition_id: str=None, replica_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + partition_id: Optional[str] = None, + replica_id: Optional[str] = None, + **kwargs + ): super(StatefulServiceReplicaHealthState, self).__init__(aggregated_health_state=aggregated_health_state, partition_id=partition_id, **kwargs) + self.service_kind = 'Stateful' # type: str self.replica_id = replica_id - self.service_kind = 'Stateful' class StatefulServiceReplicaInfo(ReplicaInfo): - """Represents a stateful service replica. This includes information about the - identity, role, status, health, node name, uptime, and other details about - the replica. + """Represents a stateful service replica. This includes information about the identity, role, status, health, node name, uptime, and other details about the replica. All required parameters must be populated in order to send to Azure. - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of - the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. :type last_in_build_duration_in_seconds: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_role: The role of a replica of a stateful service. Possible - values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', - 'ActiveSecondary' + :param replica_role: The role of a replica of a stateful service. Possible values include: + "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". :type replica_role: str or ~azure.servicefabric.models.ReplicaRole - :param replica_id: Id of a stateful service replica. ReplicaId is used by - Service Fabric to uniquely identify a replica of a partition. It is unique - within a partition and does not change for the lifetime of the replica. If - a replica gets dropped and another replica gets created on the same node - for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a - replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to + uniquely identify a replica of a partition. It is unique within a partition and does not change + for the lifetime of the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for the id. Sometimes the + id of a stateless service instance is also referred as a replica id. :type replica_id: str """ @@ -20865,55 +25420,61 @@ class StatefulServiceReplicaInfo(ReplicaInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, *, replica_status=None, health_state=None, node_name: str=None, address: str=None, last_in_build_duration_in_seconds: str=None, replica_role=None, replica_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + replica_status: Optional[Union[str, "ReplicaStatus"]] = None, + health_state: Optional[Union[str, "HealthState"]] = None, + node_name: Optional[str] = None, + address: Optional[str] = None, + last_in_build_duration_in_seconds: Optional[str] = None, + replica_role: Optional[Union[str, "ReplicaRole"]] = None, + replica_id: Optional[str] = None, + **kwargs + ): super(StatefulServiceReplicaInfo, self).__init__(replica_status=replica_status, health_state=health_state, node_name=node_name, address=address, last_in_build_duration_in_seconds=last_in_build_duration_in_seconds, **kwargs) + self.service_kind = 'Stateful' # type: str self.replica_role = replica_role self.replica_id = replica_id - self.service_kind = 'Stateful' class StatefulServiceTypeDescription(ServiceTypeDescription): - """Describes a stateful service type defined in the service manifest of a - provisioned application type. + """Describes a stateful service type defined in the service manifest of a provisioned application type. All required parameters must be populated in order to send to Azure. - :param is_stateful: Indicates whether the service type is a stateful - service type or a stateless service type. This property is true if the - service type is a stateful service type, false otherwise. + :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. + Possible values include: "Invalid", "Stateless", "Stateful". + :type kind: str or ~azure.servicefabric.models.ServiceKind + :param is_stateful: Indicates whether the service type is a stateful service type or a + stateless service type. This property is true if the service type is a stateful service type, + false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when - instantiating this service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when instantiating this + service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy - descriptions. + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: - list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param kind: Required. Constant filled by server. - :type kind: str - :param has_persisted_state: A flag indicating whether this is a persistent - service which stores states on the local disk. If it is then the value of - this property is true, if not it is false. + :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param has_persisted_state: A flag indicating whether this is a persistent service which stores + states on the local disk. If it is then the value of this property is true, if not it is false. :type has_persisted_state: bool """ @@ -20922,20 +25483,31 @@ class StatefulServiceTypeDescription(ServiceTypeDescription): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__(self, *, is_stateful: bool=None, service_type_name: str=None, placement_constraints: str=None, load_metrics=None, service_placement_policies=None, extensions=None, has_persisted_state: bool=None, **kwargs) -> None: + def __init__( + self, + *, + is_stateful: Optional[bool] = None, + service_type_name: Optional[str] = None, + placement_constraints: Optional[str] = None, + load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, + service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, + extensions: Optional[List["ServiceTypeExtensionDescription"]] = None, + has_persisted_state: Optional[bool] = None, + **kwargs + ): super(StatefulServiceTypeDescription, self).__init__(is_stateful=is_stateful, service_type_name=service_type_name, placement_constraints=placement_constraints, load_metrics=load_metrics, service_placement_policies=service_placement_policies, extensions=extensions, **kwargs) + self.kind = 'Stateful' # type: str self.has_persisted_state = has_persisted_state - self.kind = 'Stateful' class StatefulServiceUpdateDescription(ServiceUpdateDescription): @@ -20943,93 +25515,96 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): All required parameters must be populated in order to send to Azure. - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 6 then the flags for - ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. - - None - Does not indicate any other properties are set. The value is - zero. - - TargetReplicaSetSize/InstanceCount - Indicates whether the - TargetReplicaSetSize property (for Stateful services) or the InstanceCount - property (for Stateless services) is set. The value is 1. - - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration - property is set. The value is 2. - - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property - is set. The value is 4. - - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration - property is set. The value is 8. - - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The - value is 16. - - PlacementConstraints - Indicates the PlacementConstraints property is - set. The value is 32. - - PlacementPolicyList - Indicates the ServicePlacementPolicies property is - set. The value is 64. - - Correlation - Indicates the CorrelationScheme property is set. The value - is 128. - - Metrics - Indicates the ServiceLoadMetrics property is set. The value is - 256. - - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The - value is 512. - - ScalingPolicy - Indicates the ScalingPolicies property is set. The value - is 1024. - - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit - property is set. The value is 2048. - - MinInstanceCount - Indicates the MinInstanceCount property is set. The - value is 4096. - - MinInstancePercentage - Indicates the MinInstancePercentage property is - set. The value is 8192. - - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration - property is set. The value is 16384. - - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property - is set. The value is 32768. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and + QuorumLossWaitDuration (4) are set. + + + * None - Does not indicate any other properties are set. The value is zero. + * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property + (for Stateful services) or the InstanceCount property (for Stateless services) is set. The + value is 1. + * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The + value is 2. + * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is + 4. + * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The + value is 8. + * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. + * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. + * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is + 64. + * Correlation - Indicates the CorrelationScheme property is set. The value is 128. + * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. + * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. + * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. + * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The + value is 2048. + * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. + * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is + 8192. + * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 16384. + * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 32768. + * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value + is 65536. + * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. + * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. + * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_dns_name: The DNS name of the service. + :type service_dns_name: str + :param tags_for_placement: Tags for placement of this service. + :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription + :param tags_for_running: Tags for running of this service. + :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: int :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: int - :param replica_restart_wait_duration_seconds: The duration, in seconds, - between when a replica goes down and when a new replica is created. + :param replica_restart_wait_duration_seconds: The duration, in seconds, between when a replica + goes down and when a new replica is created. :type replica_restart_wait_duration_seconds: str - :param quorum_loss_wait_duration_seconds: The maximum duration, in - seconds, for which a partition is allowed to be in a state of quorum loss. + :param quorum_loss_wait_duration_seconds: The maximum duration, in seconds, for which a + partition is allowed to be in a state of quorum loss. :type quorum_loss_wait_duration_seconds: str - :param stand_by_replica_keep_duration_seconds: The definition on how long - StandBy replicas should be maintained before being removed. + :param stand_by_replica_keep_duration_seconds: The definition on how long StandBy replicas + should be maintained before being removed. :type stand_by_replica_keep_duration_seconds: str - :param service_placement_time_limit_seconds: The duration for which - replicas can stay InBuild before reporting that build is stuck. + :param service_placement_time_limit_seconds: The duration for which replicas can stay InBuild + before reporting that build is stuck. :type service_placement_time_limit_seconds: str - :param drop_source_replica_on_move: Indicates whether to drop source - Secondary replica even if the target replica has not finished build. If - desired behavior is to drop it as soon as possible the value of this - property is true, if not it is false. + :param drop_source_replica_on_move: Indicates whether to drop source Secondary replica even if + the target replica has not finished build. If desired behavior is to drop it as soon as + possible the value of this property is true, if not it is false. :type drop_source_replica_on_move: bool + :param replica_lifecycle_description: Defines how replicas of this service will behave during + their lifecycle. + :type replica_lifecycle_description: ~azure.servicefabric.models.ReplicaLifecycleDescription """ _validation = { @@ -21039,6 +25614,7 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -21046,7 +25622,9 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, + 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, 'replica_restart_wait_duration_seconds': {'key': 'ReplicaRestartWaitDurationSeconds', 'type': 'str'}, @@ -21054,10 +25632,34 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'str'}, 'service_placement_time_limit_seconds': {'key': 'ServicePlacementTimeLimitSeconds', 'type': 'str'}, 'drop_source_replica_on_move': {'key': 'DropSourceReplicaOnMove', 'type': 'bool'}, - } - - def __init__(self, *, flags: str=None, placement_constraints: str=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, scaling_policies=None, target_replica_set_size: int=None, min_replica_set_size: int=None, replica_restart_wait_duration_seconds: str=None, quorum_loss_wait_duration_seconds: str=None, stand_by_replica_keep_duration_seconds: str=None, service_placement_time_limit_seconds: str=None, drop_source_replica_on_move: bool=None, **kwargs) -> None: - super(StatefulServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, scaling_policies=scaling_policies, **kwargs) + 'replica_lifecycle_description': {'key': 'ReplicaLifecycleDescription', 'type': 'ReplicaLifecycleDescription'}, + } + + def __init__( + self, + *, + flags: Optional[str] = None, + placement_constraints: Optional[str] = None, + correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, + load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, + service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, + default_move_cost: Optional[Union[str, "MoveCost"]] = None, + scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, + service_dns_name: Optional[str] = None, + tags_for_placement: Optional["NodeTagsDescription"] = None, + tags_for_running: Optional["NodeTagsDescription"] = None, + target_replica_set_size: Optional[int] = None, + min_replica_set_size: Optional[int] = None, + replica_restart_wait_duration_seconds: Optional[str] = None, + quorum_loss_wait_duration_seconds: Optional[str] = None, + stand_by_replica_keep_duration_seconds: Optional[str] = None, + service_placement_time_limit_seconds: Optional[str] = None, + drop_source_replica_on_move: Optional[bool] = None, + replica_lifecycle_description: Optional["ReplicaLifecycleDescription"] = None, + **kwargs + ): + super(StatefulServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, scaling_policies=scaling_policies, service_dns_name=service_dns_name, tags_for_placement=tags_for_placement, tags_for_running=tags_for_running, **kwargs) + self.service_kind = 'Stateful' # type: str self.target_replica_set_size = target_replica_set_size self.min_replica_set_size = min_replica_set_size self.replica_restart_wait_duration_seconds = replica_restart_wait_duration_seconds @@ -21065,7 +25667,7 @@ def __init__(self, *, flags: str=None, placement_constraints: str=None, correlat self.stand_by_replica_keep_duration_seconds = stand_by_replica_keep_duration_seconds self.service_placement_time_limit_seconds = service_placement_time_limit_seconds self.drop_source_replica_on_move = drop_source_replica_on_move - self.service_kind = 'Stateful' + self.replica_lifecycle_description = replica_lifecycle_description class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): @@ -21073,31 +25675,48 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param source_id: Required. Id of report source. :type source_id: str @@ -21111,17 +25730,16 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'source_id': {'required': True}, @@ -21135,11 +25753,11 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -21152,8 +25770,27 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + replica_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(StatelessReplicaHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.kind = 'StatelessReplicaHealthReportExpired' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -21162,7 +25799,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, rep self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'StatelessReplicaHealthReportExpired' class StatelessReplicaNewHealthReportEvent(ReplicaEvent): @@ -21170,31 +25806,48 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param event_instance_id: Required. The identifier for the FabricEvent - instance. + :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values + include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", + "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", + "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", + "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", + "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", + "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", + "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", + "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", + "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", + "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", + "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", + "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", + "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", + "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", + "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", + "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", + "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", + "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", + "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", + "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", + "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", + "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". + :type kind: str or ~azure.servicefabric.models.FabricEventKind + :param event_instance_id: Required. The identifier for the FabricEvent instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: datetime - :param has_correlated_events: Shows there is existing related events - available. + :type time_stamp: ~datetime.datetime + :param has_correlated_events: Shows there is existing related events available. :type has_correlated_events: bool - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Required. An internal ID used by Service Fabric to - uniquely identify a partition. This is a randomly generated GUID when the - service was created. The partition ID is unique and does not change for - the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. The partition ID is + unique and does not change for the lifetime of the service. If the same service was deleted and + recreated the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId - is used by Service Fabric to uniquely identify a replica of a partition. - It is unique within a partition and does not change for the lifetime of - the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for - the id. Sometimes the id of a stateless service instance is also referred - as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service + Fabric to uniquely identify a replica of a partition. It is unique within a partition and does + not change for the lifetime of the replica. If a replica gets dropped and another replica gets + created on the same node for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a replica id. :type replica_id: long :param source_id: Required. Id of report source. :type source_id: str @@ -21208,17 +25861,16 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it - expires. + :param remove_when_expired: Required. Indicates the removal when it expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: datetime + :type source_utc_timestamp: ~datetime.datetime """ _validation = { + 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, - 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'source_id': {'required': True}, @@ -21232,11 +25884,11 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -21249,8 +25901,27 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: + def __init__( + self, + *, + event_instance_id: str, + time_stamp: datetime.datetime, + partition_id: str, + replica_id: int, + source_id: str, + property: str, + health_state: str, + time_to_live_ms: int, + sequence_number: int, + description: str, + remove_when_expired: bool, + source_utc_timestamp: datetime.datetime, + category: Optional[str] = None, + has_correlated_events: Optional[bool] = None, + **kwargs + ): super(StatelessReplicaNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.kind = 'StatelessReplicaNewHealthReport' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -21259,7 +25930,6 @@ def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, rep self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp - self.kind = 'StatelessReplicaNewHealthReport' class StatelessServiceDescription(ServiceDescription): @@ -21267,121 +25937,128 @@ class StatelessServiceDescription(ServiceDescription): All required parameters must be populated in order to send to Azure. - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' - URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified - in the service manifest. + :param service_type_name: Required. Name of the service type as specified in the service + manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. - Initialization data is passed to service instances or replicas when they - are created. + :param initialization_data: The initialization data as an array of bytes. Initialization data + is passed to service instances or replicas when they are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an - object. - :type partition_description: - ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an object. + :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost - property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service - package to be used for a service. Possible values include: - 'SharedProcess', 'ExclusiveProcess' + :param service_package_activation_mode: The activation mode of service package to be used for a + service. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS - system service to be enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS system service to be + enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param tags_required_to_place: Tags for placement of this service. + :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :param tags_required_to_run: Tags for running of this service. + :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription :param instance_count: Required. The instance count. :type instance_count: int - :param min_instance_count: MinInstanceCount is the minimum number of - instances that must be up to meet the EnsureAvailability safety check - during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation - -1 is first converted into the number of nodes on which the instances are - allowed to be placed according to the placement constraints on the - service. + :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up + to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted + into the number of nodes on which the instances are allowed to be placed according to the + placement constraints on the service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum - percentage of InstanceCount that must be up to meet the EnsureAvailability - safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage - computation, -1 is first converted into the number of nodes on which the - instances are allowed to be placed according to the placement constraints - on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum percentage of + InstanceCount that must be up to meet the EnsureAvailability safety check during operations + like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first + converted into the number of nodes on which the instances are allowed to be placed according to + the placement constraints on the service. :type min_instance_percentage: int - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 1 then the flags for - InstanceCloseDelayDuration is set. - - None - Does not indicate any other properties are set. The value is - zero. - - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration - property is set. The value is 1. + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 1 then the flags for InstanceCloseDelayDuration is set. + + + * None - Does not indicate any other properties are set. The value is zero. + * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 1. + * InstanceRestartWaitDuration - Indicates the InstanceRestartWaitDurationSeconds property is + set. The value is 2. :type flags: int - :param instance_close_delay_duration_seconds: Duration in seconds, to wait - before a stateless instance is closed, to allow the active requests to - drain gracefully. This would be effective when the instance is closing - during the application/cluster upgrade and disabling node. - The endpoint exposed on this instance is removed prior to starting the - delay, which prevents new connections to this instance. + :param instance_close_delay_duration_seconds: Duration in seconds, to wait before a stateless + instance is closed, to allow the active requests to drain gracefully. This would be effective + when the instance is closing during the application/cluster upgrade and disabling node. + The endpoint exposed on this instance is removed prior to starting the delay, which prevents + new connections to this instance. In addition, clients that have subscribed to service endpoint change events(https://docs.microsoft.com/dotnet/api/system.fabric.fabricclient.servicemanagementclient.registerservicenotificationfilterasync), can do the following upon receiving the endpoint removal notification: - - Stop sending new requests to this instance. - - Close existing connections after in-flight requests have completed. - - Connect to a different instance of the service partition for future - requests. - Note, the default value of InstanceCloseDelayDuration is 0, which - indicates that there won't be any delay or removal of the endpoint prior - to closing the instance. + + .. code-block:: + + - Stop sending new requests to this instance. + - Close existing connections after in-flight requests have completed. + - Connect to a different instance of the service partition for future requests. + + Note, the default value of InstanceCloseDelayDuration is 0, which indicates that there won't + be any delay or removal of the endpoint prior to closing the instance. :type instance_close_delay_duration_seconds: long + :param instance_lifecycle_description: Defines how instances of this service will behave during + their lifecycle. + :type instance_lifecycle_description: ~azure.servicefabric.models.InstanceLifecycleDescription + :param instance_restart_wait_duration_seconds: When a stateless instance goes down, this timer + starts. When it expires Service Fabric will create a new instance on any node in the cluster. + This configuration is to reduce unnecessary creation of a new instance in situations where the + instance going down is likely to recover in a short time. For example, during an upgrade. + The default value is 0, which indicates that when stateless instance goes down, Service Fabric + will immediately start building its replacement. + :type instance_restart_wait_duration_seconds: long """ _validation = { + 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, - 'service_kind': {'required': True}, 'instance_count': {'required': True, 'minimum': -1}, + 'min_instance_count': {'minimum': 1}, + 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, 'instance_close_delay_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, + 'instance_restart_wait_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -21396,22 +26073,54 @@ class StatelessServiceDescription(ServiceDescription): 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, + 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, 'flags': {'key': 'Flags', 'type': 'int'}, 'instance_close_delay_duration_seconds': {'key': 'InstanceCloseDelayDurationSeconds', 'type': 'long'}, - } - - def __init__(self, *, service_name: str, service_type_name: str, partition_description, instance_count: int, application_name: str=None, initialization_data=None, placement_constraints: str=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified: bool=None, service_package_activation_mode=None, service_dns_name: str=None, scaling_policies=None, min_instance_count: int=None, min_instance_percentage: int=None, flags: int=None, instance_close_delay_duration_seconds: int=None, **kwargs) -> None: - super(StatelessServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name, scaling_policies=scaling_policies, **kwargs) + 'instance_lifecycle_description': {'key': 'InstanceLifecycleDescription', 'type': 'InstanceLifecycleDescription'}, + 'instance_restart_wait_duration_seconds': {'key': 'InstanceRestartWaitDurationSeconds', 'type': 'long'}, + } + + def __init__( + self, + *, + service_name: str, + service_type_name: str, + partition_description: "PartitionSchemeDescription", + instance_count: int, + application_name: Optional[str] = None, + initialization_data: Optional[List[int]] = None, + placement_constraints: Optional[str] = None, + correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, + service_load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, + service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, + default_move_cost: Optional[Union[str, "MoveCost"]] = None, + is_default_move_cost_specified: Optional[bool] = None, + service_package_activation_mode: Optional[Union[str, "ServicePackageActivationMode"]] = None, + service_dns_name: Optional[str] = None, + scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, + tags_required_to_place: Optional["NodeTagsDescription"] = None, + tags_required_to_run: Optional["NodeTagsDescription"] = None, + min_instance_count: Optional[int] = 1, + min_instance_percentage: Optional[int] = 0, + flags: Optional[int] = None, + instance_close_delay_duration_seconds: Optional[int] = None, + instance_lifecycle_description: Optional["InstanceLifecycleDescription"] = None, + instance_restart_wait_duration_seconds: Optional[int] = None, + **kwargs + ): + super(StatelessServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name, scaling_policies=scaling_policies, tags_required_to_place=tags_required_to_place, tags_required_to_run=tags_required_to_run, **kwargs) + self.service_kind = 'Stateless' # type: str self.instance_count = instance_count self.min_instance_count = min_instance_count self.min_instance_percentage = min_instance_percentage self.flags = flags self.instance_close_delay_duration_seconds = instance_close_delay_duration_seconds - self.service_kind = 'Stateless' + self.instance_lifecycle_description = instance_lifecycle_description + self.instance_restart_wait_duration_seconds = instance_restart_wait_duration_seconds class StatelessServiceInfo(ServiceInfo): @@ -21419,33 +26128,31 @@ class StatelessServiceInfo(ServiceInfo): All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded - representation of the service name. This is used in the REST APIs to - identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "\\~" - character. For example, if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1\\~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. + :param id: The identity of the service. This ID is an encoded representation of the service + name. This is used in the REST APIs to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, + if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous + versions. :type id: str + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service - manifest. + :param type_name: Name of the service type as specified in the service manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values - include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', - 'Failed' + :param service_status: The status of the application. Possible values include: "Unknown", + "Active", "Upgrading", "Deleting", "Creating", "Failed". :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool - :param service_kind: Required. Constant filled by server. - :type service_kind: str """ _validation = { @@ -21454,53 +26161,61 @@ class StatelessServiceInfo(ServiceInfo): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } - def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, **kwargs) -> None: + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + type_name: Optional[str] = None, + manifest_version: Optional[str] = None, + health_state: Optional[Union[str, "HealthState"]] = None, + service_status: Optional[Union[str, "ServiceStatus"]] = None, + is_service_group: Optional[bool] = None, + **kwargs + ): super(StatelessServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group, **kwargs) - self.service_kind = 'Stateless' + self.service_kind = 'Stateless' # type: str class StatelessServiceInstanceHealth(ReplicaHealth): """Represents the health of the stateless service instance. - Contains the instance aggregated health state, the health events and the - unhealthy evaluations. +Contains the instance aggregated health state, the health events and the unhealthy evaluations. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the - aggregated health state of the entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the - entity and its children (if any). - The aggregation is done by applying the desired health policy. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the aggregated health state of the + entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the entity and its + children (if any). + The aggregation is done by applying the desired health policy. Possible values include: + "Invalid", "Ok", "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the - current aggregated health state was returned by Health Manager. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children - types of the queried entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated + health state was returned by Health Manager. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children types of the queried + entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param instance_id: Id of a stateless service instance. InstanceId is used - by Service Fabric to uniquely identify an instance of a partition of a - stateless service. It is unique within a partition and does not change for - the lifetime of the instance. If the instance has failed over on the same - or different node, it will get a different value for the InstanceId. + :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to + uniquely identify an instance of a partition of a stateless service. It is unique within a + partition and does not change for the lifetime of the instance. If the instance has failed over + on the same or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -21513,36 +26228,43 @@ class StatelessServiceInstanceHealth(ReplicaHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, instance_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + health_events: Optional[List["HealthEvent"]] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + health_statistics: Optional["HealthStatistics"] = None, + partition_id: Optional[str] = None, + instance_id: Optional[str] = None, + **kwargs + ): super(StatelessServiceInstanceHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, partition_id=partition_id, **kwargs) + self.service_kind = 'Stateless' # type: str self.instance_id = instance_id - self.service_kind = 'Stateless' class StatelessServiceInstanceHealthState(ReplicaHealthState): - """Represents the health state of the stateless service instance, which - contains the instance ID and the aggregated health state. + """Represents the health state of the stateless service instance, which contains the instance ID and the aggregated health state. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param partition_id: The ID of the partition to which this replica - belongs. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param partition_id: The ID of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param replica_id: Id of the stateless service instance on the wire this - field is called ReplicaId. + :param replica_id: Id of the stateless service instance on the wire this field is called + ReplicaId. :type replica_id: str """ @@ -21552,46 +26274,49 @@ class StatelessServiceInstanceHealthState(ReplicaHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, *, aggregated_health_state=None, partition_id: str=None, replica_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + partition_id: Optional[str] = None, + replica_id: Optional[str] = None, + **kwargs + ): super(StatelessServiceInstanceHealthState, self).__init__(aggregated_health_state=aggregated_health_state, partition_id=partition_id, **kwargs) + self.service_kind = 'Stateless' # type: str self.replica_id = replica_id - self.service_kind = 'Stateless' class StatelessServiceInstanceInfo(ReplicaInfo): - """Represents a stateless service instance. This includes information about - the identity, status, health, node name, uptime, and other details about - the instance. + """Represents a stateless service instance. This includes information about the identity, status, health, node name, uptime, and other details about the instance. All required parameters must be populated in order to send to Azure. - :param replica_status: The status of a replica of a service. Possible - values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', - 'Dropped' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param replica_status: The status of a replica of a service. Possible values include: + "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of - the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. :type last_in_build_duration_in_seconds: str - :param service_kind: Required. Constant filled by server. - :type service_kind: str - :param instance_id: Id of a stateless service instance. InstanceId is used - by Service Fabric to uniquely identify an instance of a partition of a - stateless service. It is unique within a partition and does not change for - the lifetime of the instance. If the instance has failed over on the same - or different node, it will get a different value for the InstanceId. + :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to + uniquely identify an instance of a partition of a stateless service. It is unique within a + partition and does not change for the lifetime of the instance. If the instance has failed over + on the same or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -21600,19 +26325,29 @@ class StatelessServiceInstanceInfo(ReplicaInfo): } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, *, replica_status=None, health_state=None, node_name: str=None, address: str=None, last_in_build_duration_in_seconds: str=None, instance_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + replica_status: Optional[Union[str, "ReplicaStatus"]] = None, + health_state: Optional[Union[str, "HealthState"]] = None, + node_name: Optional[str] = None, + address: Optional[str] = None, + last_in_build_duration_in_seconds: Optional[str] = None, + instance_id: Optional[str] = None, + **kwargs + ): super(StatelessServiceInstanceInfo, self).__init__(replica_status=replica_status, health_state=health_state, node_name=node_name, address=address, last_in_build_duration_in_seconds=last_in_build_duration_in_seconds, **kwargs) + self.service_kind = 'Stateless' # type: str self.instance_id = instance_id - self.service_kind = 'Stateless' class StatelessServicePartitionInfo(ServicePartitionInfo): @@ -21620,100 +26355,102 @@ class StatelessServicePartitionInfo(ServicePartitionInfo): All required parameters must be populated in order to send to Azure. - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by + server. Possible values include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, + Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service - partition. Possible values include: 'Invalid', 'Ready', 'NotReady', - 'InQuorumLoss', 'Reconfiguring', 'Deleting' - :type partition_status: str or - ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, - partitioning scheme and keys supported by it. - :type partition_information: - ~azure.servicefabric.models.PartitionInformation - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :param partition_status: The status of the service fabric service partition. Possible values + include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". + :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, partitioning scheme and + keys supported by it. + :type partition_information: ~azure.servicefabric.models.PartitionInformation :param instance_count: Number of instances of this partition. :type instance_count: long - :param min_instance_count: MinInstanceCount is the minimum number of - instances that must be up to meet the EnsureAvailability safety check - during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation - -1 is first converted into the number of nodes on which the instances are - allowed to be placed according to the placement constraints on the - service. + :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up + to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted + into the number of nodes on which the instances are allowed to be placed according to the + placement constraints on the service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum - percentage of InstanceCount that must be up to meet the EnsureAvailability - safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage - computation, -1 is first converted into the number of nodes on which the - instances are allowed to be placed according to the placement constraints - on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum percentage of + InstanceCount that must be up to meet the EnsureAvailability safety check during operations + like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first + converted into the number of nodes on which the instances are allowed to be placed according to + the placement constraints on the service. :type min_instance_percentage: int """ _validation = { 'service_kind': {'required': True}, + 'min_instance_count': {'minimum': 1}, + 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'long'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, } - def __init__(self, *, health_state=None, partition_status=None, partition_information=None, instance_count: int=None, min_instance_count: int=None, min_instance_percentage: int=None, **kwargs) -> None: + def __init__( + self, + *, + health_state: Optional[Union[str, "HealthState"]] = None, + partition_status: Optional[Union[str, "ServicePartitionStatus"]] = None, + partition_information: Optional["PartitionInformation"] = None, + instance_count: Optional[int] = None, + min_instance_count: Optional[int] = 1, + min_instance_percentage: Optional[int] = 0, + **kwargs + ): super(StatelessServicePartitionInfo, self).__init__(health_state=health_state, partition_status=partition_status, partition_information=partition_information, **kwargs) + self.service_kind = 'Stateless' # type: str self.instance_count = instance_count self.min_instance_count = min_instance_count self.min_instance_percentage = min_instance_percentage - self.service_kind = 'Stateless' class StatelessServiceTypeDescription(ServiceTypeDescription): - """Describes a stateless service type defined in the service manifest of a - provisioned application type. + """Describes a stateless service type defined in the service manifest of a provisioned application type. All required parameters must be populated in order to send to Azure. - :param is_stateful: Indicates whether the service type is a stateful - service type or a stateless service type. This property is true if the - service type is a stateful service type, false otherwise. + :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. + Possible values include: "Invalid", "Stateless", "Stateful". + :type kind: str or ~azure.servicefabric.models.ServiceKind + :param is_stateful: Indicates whether the service type is a stateful service type or a + stateless service type. This property is true if the service type is a stateful service type, + false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when - instantiating this service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when instantiating this + service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy - descriptions. + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: - list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param kind: Required. Constant filled by server. - :type kind: str - :param use_implicit_host: A flag indicating if this type is not - implemented and hosted by a user service process, but is implicitly hosted - by a system created process. This value is true for services using the - guest executable services, false otherwise. + :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param use_implicit_host: A flag indicating if this type is not implemented and hosted by a + user service process, but is implicitly hosted by a system created process. This value is true + for services using the guest executable services, false otherwise. :type use_implicit_host: bool """ @@ -21722,20 +26459,31 @@ class StatelessServiceTypeDescription(ServiceTypeDescription): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'use_implicit_host': {'key': 'UseImplicitHost', 'type': 'bool'}, } - def __init__(self, *, is_stateful: bool=None, service_type_name: str=None, placement_constraints: str=None, load_metrics=None, service_placement_policies=None, extensions=None, use_implicit_host: bool=None, **kwargs) -> None: + def __init__( + self, + *, + is_stateful: Optional[bool] = None, + service_type_name: Optional[str] = None, + placement_constraints: Optional[str] = None, + load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, + service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, + extensions: Optional[List["ServiceTypeExtensionDescription"]] = None, + use_implicit_host: Optional[bool] = None, + **kwargs + ): super(StatelessServiceTypeDescription, self).__init__(is_stateful=is_stateful, service_type_name=service_type_name, placement_constraints=placement_constraints, load_metrics=load_metrics, service_placement_policies=service_placement_policies, extensions=extensions, **kwargs) + self.kind = 'Stateless' # type: str self.use_implicit_host = use_implicit_host - self.kind = 'Stateless' class StatelessServiceUpdateDescription(ServiceUpdateDescription): @@ -21743,117 +26491,129 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): All required parameters must be populated in order to send to Azure. - :param flags: Flags indicating whether other properties are set. Each of - the associated properties corresponds to a flag, specified below, which, - if set, indicate that the property is specified. - This property can be a combination of those flags obtained using bitwise - 'OR' operator. - For example, if the provided value is 6 then the flags for - ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. - - None - Does not indicate any other properties are set. The value is - zero. - - TargetReplicaSetSize/InstanceCount - Indicates whether the - TargetReplicaSetSize property (for Stateful services) or the InstanceCount - property (for Stateless services) is set. The value is 1. - - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration - property is set. The value is 2. - - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property - is set. The value is 4. - - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration - property is set. The value is 8. - - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The - value is 16. - - PlacementConstraints - Indicates the PlacementConstraints property is - set. The value is 32. - - PlacementPolicyList - Indicates the ServicePlacementPolicies property is - set. The value is 64. - - Correlation - Indicates the CorrelationScheme property is set. The value - is 128. - - Metrics - Indicates the ServiceLoadMetrics property is set. The value is - 256. - - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The - value is 512. - - ScalingPolicy - Indicates the ScalingPolicies property is set. The value - is 1024. - - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit - property is set. The value is 2048. - - MinInstanceCount - Indicates the MinInstanceCount property is set. The - value is 4096. - - MinInstancePercentage - Indicates the MinInstancePercentage property is - set. The value is 8192. - - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration - property is set. The value is 16384. - - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property - is set. The value is 32768. + :param service_kind: Required. The service kind.Constant filled by server. Possible values + include: "Invalid", "Stateless", "Stateful". + :type service_kind: str or ~azure.servicefabric.models.ServiceKind + :param flags: Flags indicating whether other properties are set. Each of the associated + properties corresponds to a flag, specified below, which, if set, indicate that the property is + specified. + This property can be a combination of those flags obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and + QuorumLossWaitDuration (4) are set. + + + * None - Does not indicate any other properties are set. The value is zero. + * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property + (for Stateful services) or the InstanceCount property (for Stateless services) is set. The + value is 1. + * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The + value is 2. + * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is + 4. + * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The + value is 8. + * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. + * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. + * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is + 64. + * Correlation - Indicates the CorrelationScheme property is set. The value is 128. + * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. + * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. + * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. + * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The + value is 2048. + * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. + * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is + 8192. + * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 16384. + * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The + value is 32768. + * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value + is 65536. + * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. + * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. + * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. - Placement constraints are boolean expressions on node properties and allow - for restricting a service to particular nodes based on the service - requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. Placement constraints are + boolean expressions on node properties and allow for restricting a service to particular nodes + based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: - list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: - list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values - include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' + :param default_move_cost: The move cost for the service. Possible values include: "Zero", + "Low", "Medium", "High", "VeryHigh". :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: - list[~azure.servicefabric.models.ScalingPolicyDescription] - :param service_kind: Required. Constant filled by server. - :type service_kind: str + :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_dns_name: The DNS name of the service. + :type service_dns_name: str + :param tags_for_placement: Tags for placement of this service. + :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription + :param tags_for_running: Tags for running of this service. + :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription :param instance_count: The instance count. :type instance_count: int - :param min_instance_count: MinInstanceCount is the minimum number of - instances that must be up to meet the EnsureAvailability safety check - during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation - -1 is first converted into the number of nodes on which the instances are - allowed to be placed according to the placement constraints on the - service. + :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up + to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted + into the number of nodes on which the instances are allowed to be placed according to the + placement constraints on the service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum - percentage of InstanceCount that must be up to meet the EnsureAvailability - safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( - MinInstancePercentage/100.0 * InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage - computation, -1 is first converted into the number of nodes on which the - instances are allowed to be placed according to the placement constraints - on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum percentage of + InstanceCount that must be up to meet the EnsureAvailability safety check during operations + like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * + InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first + converted into the number of nodes on which the instances are allowed to be placed according to + the placement constraints on the service. :type min_instance_percentage: int - :param instance_close_delay_duration_seconds: Duration in seconds, to wait - before a stateless instance is closed, to allow the active requests to - drain gracefully. This would be effective when the instance is closing - during the application/cluster upgrade and disabling node. - The endpoint exposed on this instance is removed prior to starting the - delay, which prevents new connections to this instance. + :param instance_close_delay_duration_seconds: Duration in seconds, to wait before a stateless + instance is closed, to allow the active requests to drain gracefully. This would be effective + when the instance is closing during the application/cluster upgrade and disabling node. + The endpoint exposed on this instance is removed prior to starting the delay, which prevents + new connections to this instance. In addition, clients that have subscribed to service endpoint change events(https://docs.microsoft.com/dotnet/api/system.fabric.fabricclient.servicemanagementclient.registerservicenotificationfilterasync), can do the following upon receiving the endpoint removal notification: - - Stop sending new requests to this instance. - - Close existing connections after in-flight requests have completed. - - Connect to a different instance of the service partition for future - requests. + + .. code-block:: + + - Stop sending new requests to this instance. + - Close existing connections after in-flight requests have completed. + - Connect to a different instance of the service partition for future requests. :type instance_close_delay_duration_seconds: str + :param instance_lifecycle_description: Defines how instances of this service will behave during + their lifecycle. + :type instance_lifecycle_description: ~azure.servicefabric.models.InstanceLifecycleDescription + :param instance_restart_wait_duration_seconds: When a stateless instance goes down, this timer + starts. When it expires Service Fabric will create a new instance on any node in the cluster. + This configuration is to reduce unnecessary creation of a new instance in situations where the + instance going down is likely to recover in a short time. For example, during an upgrade. + The default value is 0, which indicates that when stateless instance goes down, Service Fabric + will immediately start building its replacement. + :type instance_restart_wait_duration_seconds: str """ _validation = { 'service_kind': {'required': True}, 'instance_count': {'minimum': -1}, + 'min_instance_count': {'minimum': 1}, + 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, } _attribute_map = { + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -21861,53 +26621,85 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, + 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, 'instance_close_delay_duration_seconds': {'key': 'InstanceCloseDelayDurationSeconds', 'type': 'str'}, - } - - def __init__(self, *, flags: str=None, placement_constraints: str=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, scaling_policies=None, instance_count: int=None, min_instance_count: int=None, min_instance_percentage: int=None, instance_close_delay_duration_seconds: str=None, **kwargs) -> None: - super(StatelessServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, scaling_policies=scaling_policies, **kwargs) + 'instance_lifecycle_description': {'key': 'InstanceLifecycleDescription', 'type': 'InstanceLifecycleDescription'}, + 'instance_restart_wait_duration_seconds': {'key': 'InstanceRestartWaitDurationSeconds', 'type': 'str'}, + } + + def __init__( + self, + *, + flags: Optional[str] = None, + placement_constraints: Optional[str] = None, + correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, + load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, + service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, + default_move_cost: Optional[Union[str, "MoveCost"]] = None, + scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, + service_dns_name: Optional[str] = None, + tags_for_placement: Optional["NodeTagsDescription"] = None, + tags_for_running: Optional["NodeTagsDescription"] = None, + instance_count: Optional[int] = None, + min_instance_count: Optional[int] = 1, + min_instance_percentage: Optional[int] = 0, + instance_close_delay_duration_seconds: Optional[str] = None, + instance_lifecycle_description: Optional["InstanceLifecycleDescription"] = None, + instance_restart_wait_duration_seconds: Optional[str] = None, + **kwargs + ): + super(StatelessServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, scaling_policies=scaling_policies, service_dns_name=service_dns_name, tags_for_placement=tags_for_placement, tags_for_running=tags_for_running, **kwargs) + self.service_kind = 'Stateless' # type: str self.instance_count = instance_count self.min_instance_count = min_instance_count self.min_instance_percentage = min_instance_percentage self.instance_close_delay_duration_seconds = instance_close_delay_duration_seconds - self.service_kind = 'Stateless' + self.instance_lifecycle_description = instance_lifecycle_description + self.instance_restart_wait_duration_seconds = instance_restart_wait_duration_seconds class StoppedChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos stops because either - the user issued a stop or the time to run was up. + """Describes a Chaos event that gets generated when Chaos stops because either the user issued a stop or the time to run was up. All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param reason: Describes why Chaos stopped. Chaos can stop because of - StopChaos API call or the timeToRun provided in ChaosParameters is over. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param reason: Describes why Chaos stopped. Chaos can stop because of StopChaos API call or the + timeToRun provided in ChaosParameters is over. :type reason: str """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: + def __init__( + self, + *, + time_stamp_utc: datetime.datetime, + reason: Optional[str] = None, + **kwargs + ): super(StoppedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.kind = 'Stopped' # type: str self.reason = reason - self.kind = 'Stopped' class StringPropertyValue(PropertyValue): @@ -21915,8 +26707,10 @@ class StringPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str + :param kind: Required. The kind of property, determined by the type of data. Following are the + possible values.Constant filled by server. Possible values include: "Invalid", "Binary", + "Int64", "Double", "String", "Guid". + :type kind: str or ~azure.servicefabric.models.PropertyValueKind :param data: Required. The data of the property value. :type data: str """ @@ -21931,24 +26725,30 @@ class StringPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__(self, *, data: str, **kwargs) -> None: + def __init__( + self, + *, + data: str, + **kwargs + ): super(StringPropertyValue, self).__init__(**kwargs) + self.kind = 'String' # type: str self.data = data - self.kind = 'String' class SuccessfulPropertyBatchInfo(PropertyBatchInfo): - """Derived from PropertyBatchInfo. Represents the property batch succeeding. - Contains the results of any "Get" operations in the batch. + """Derived from PropertyBatchInfo. Represents the property batch succeeding. Contains the results of any "Get" operations in the batch. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param properties: A map containing the properties that were requested - through any "Get" property batch operations. The key represents the index - of the "Get" operation in the original request, in string form. The value - is the property. If a property is not found, it will not be in the map. + :param kind: Required. The kind of property batch info, determined by the results of a property + batch. The following are the possible values.Constant filled by server. Possible values + include: "Invalid", "Successful", "Failed". + :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind + :param properties: A map containing the properties that were requested through any "Get" + property batch operations. The key represents the index of the "Get" operation in the original + request, in string form. The value is the property. If a property is not found, it will not be + in the map. :type properties: dict[str, ~azure.servicefabric.models.PropertyInfo] """ @@ -21961,37 +26761,43 @@ class SuccessfulPropertyBatchInfo(PropertyBatchInfo): 'properties': {'key': 'Properties', 'type': '{PropertyInfo}'}, } - def __init__(self, *, properties=None, **kwargs) -> None: + def __init__( + self, + *, + properties: Optional[Dict[str, "PropertyInfo"]] = None, + **kwargs + ): super(SuccessfulPropertyBatchInfo, self).__init__(**kwargs) + self.kind = 'Successful' # type: str self.properties = properties - self.kind = 'Successful' class SystemApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for the fabric:/System application, containing - information about the data and the algorithm used by health store to - evaluate health. The evaluation is returned only when the aggregated health - state of the cluster is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for the fabric:/System application, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state of the cluster is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the current aggregated health state of the system application. The types - of the unhealthy evaluations can be DeployedApplicationsHealthEvaluation, - ServicesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated + health state of the system application. The types of the unhealthy evaluations can be + DeployedApplicationsHealthEvaluation, ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -21999,30 +26805,36 @@ class SystemApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(SystemApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'SystemApplication' # type: str self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'SystemApplication' -class TcpConfig(Model): +class TcpConfig(msrest.serialization.Model): """Describes the tcp configuration for external connectivity for this network. All required parameters must be populated in order to send to Azure. :param name: Required. tcp gateway config name. :type name: str - :param port: Required. Specifies the port at which the service endpoint - below needs to be exposed. + :param port: Required. Specifies the port at which the service endpoint below needs to be + exposed. :type port: int - :param destination: Required. Describes destination endpoint for routing - traffic. + :param destination: Required. Describes destination endpoint for routing traffic. :type destination: ~azure.servicefabric.models.GatewayDestination """ @@ -22038,7 +26850,14 @@ class TcpConfig(Model): 'destination': {'key': 'destination', 'type': 'GatewayDestination'}, } - def __init__(self, *, name: str, port: int, destination, **kwargs) -> None: + def __init__( + self, + *, + name: str, + port: int, + destination: "GatewayDestination", + **kwargs + ): super(TcpConfig, self).__init__(**kwargs) self.name = name self.port = port @@ -22046,41 +26865,44 @@ def __init__(self, *, name: str, port: int, destination, **kwargs) -> None: class TestErrorChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when an unexpected event occurs - in the Chaos engine. - For example, due to the cluster snapshot being inconsistent, while faulting - an entity, Chaos found that the entity was already faulted -- which would - be an unexpected event. - - All required parameters must be populated in order to send to Azure. - - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param reason: Describes why TestErrorChaosEvent was generated. For - example, Chaos tries to fault a partition but finds that the partition is - no longer fault tolerant, then a TestErrorEvent gets generated with the - reason stating that the partition is not fault tolerant. + """Describes a Chaos event that gets generated when an unexpected event occurs in the Chaos engine. +For example, due to the cluster snapshot being inconsistent, while faulting an entity, Chaos found that the entity was already faulted -- which would be an unexpected event. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param reason: Describes why TestErrorChaosEvent was generated. For example, Chaos tries to + fault a partition but finds that the partition is no longer fault tolerant, then a + TestErrorEvent gets generated with the reason stating that the partition is not fault tolerant. :type reason: str """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: + def __init__( + self, + *, + time_stamp_utc: datetime.datetime, + reason: Optional[str] = None, + **kwargs + ): super(TestErrorChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.kind = 'TestError' # type: str self.reason = reason - self.kind = 'TestError' class TimeBasedBackupScheduleDescription(BackupScheduleDescription): @@ -22088,21 +26910,20 @@ class TimeBasedBackupScheduleDescription(BackupScheduleDescription): All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. Constant filled by server. - :type schedule_kind: str - :param schedule_frequency_type: Required. Describes the frequency with - which to run the time based backup schedule. Possible values include: - 'Invalid', 'Daily', 'Weekly' - :type schedule_frequency_type: str or - ~azure.servicefabric.models.BackupScheduleFrequencyType - :param run_days: List of days of a week when to trigger the periodic - backup. This is valid only when the backup schedule frequency type is - weekly. + :param schedule_kind: Required. The kind of backup schedule, time based or frequency + based.Constant filled by server. Possible values include: "Invalid", "TimeBased", + "FrequencyBased". + :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind + :param schedule_frequency_type: Required. Describes the frequency with which to run the time + based backup schedule. Possible values include: "Invalid", "Daily", "Weekly". + :type schedule_frequency_type: str or ~azure.servicefabric.models.BackupScheduleFrequencyType + :param run_days: List of days of a week when to trigger the periodic backup. This is valid only + when the backup schedule frequency type is weekly. :type run_days: list[str or ~azure.servicefabric.models.DayOfWeek] - :param run_times: Required. Represents the list of exact time during the - day in ISO8601 format. Like '19:00:00' will represent '7PM' during the - day. Date specified along with time will be ignored. - :type run_times: list[datetime] + :param run_times: Required. Represents the list of exact time during the day in ISO8601 format. + Like '19:00:00' will represent '7PM' during the day. Date specified along with time will be + ignored. + :type run_times: list[~datetime.datetime] """ _validation = { @@ -22118,22 +26939,27 @@ class TimeBasedBackupScheduleDescription(BackupScheduleDescription): 'run_times': {'key': 'RunTimes', 'type': '[iso-8601]'}, } - def __init__(self, *, schedule_frequency_type, run_times, run_days=None, **kwargs) -> None: + def __init__( + self, + *, + schedule_frequency_type: Union[str, "BackupScheduleFrequencyType"], + run_times: List[datetime.datetime], + run_days: Optional[List[Union[str, "DayOfWeek"]]] = None, + **kwargs + ): super(TimeBasedBackupScheduleDescription, self).__init__(**kwargs) + self.schedule_kind = 'TimeBased' # type: str self.schedule_frequency_type = schedule_frequency_type self.run_days = run_days self.run_times = run_times - self.schedule_kind = 'TimeBased' -class TimeOfDay(Model): +class TimeOfDay(msrest.serialization.Model): """Defines an hour and minute of the day specified in 24 hour time. - :param hour: Represents the hour of the day. Value must be between 0 and - 23 inclusive. + :param hour: Represents the hour of the day. Value must be between 0 and 23 inclusive. :type hour: int - :param minute: Represents the minute of the hour. Value must be between 0 - to 59 inclusive. + :param minute: Represents the minute of the hour. Value must be between 0 to 59 inclusive. :type minute: int """ @@ -22147,20 +26973,24 @@ class TimeOfDay(Model): 'minute': {'key': 'Minute', 'type': 'int'}, } - def __init__(self, *, hour: int=None, minute: int=None, **kwargs) -> None: + def __init__( + self, + *, + hour: Optional[int] = None, + minute: Optional[int] = None, + **kwargs + ): super(TimeOfDay, self).__init__(**kwargs) self.hour = hour self.minute = minute -class TimeRange(Model): +class TimeRange(msrest.serialization.Model): """Defines a time range in a 24 hour day specified by a start and end time. - :param start_time: Defines an hour and minute of the day specified in 24 - hour time. + :param start_time: Defines an hour and minute of the day specified in 24 hour time. :type start_time: ~azure.servicefabric.models.TimeOfDay - :param end_time: Defines an hour and minute of the day specified in 24 - hour time. + :param end_time: Defines an hour and minute of the day specified in 24 hour time. :type end_time: ~azure.servicefabric.models.TimeOfDay """ @@ -22169,28 +26999,32 @@ class TimeRange(Model): 'end_time': {'key': 'EndTime', 'type': 'TimeOfDay'}, } - def __init__(self, *, start_time=None, end_time=None, **kwargs) -> None: + def __init__( + self, + *, + start_time: Optional["TimeOfDay"] = None, + end_time: Optional["TimeOfDay"] = None, + **kwargs + ): super(TimeRange, self).__init__(**kwargs) self.start_time = start_time self.end_time = end_time class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): - """Describes a partitioning scheme where an integer range is allocated evenly - across a number of partitions. + """Describes a partitioning scheme where an integer range is allocated evenly across a number of partitions. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Constant filled by server. - :type partition_scheme: str + :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by + server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". + :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme :param count: Required. The number of partitions. :type count: int - :param low_key: Required. String indicating the lower bound of the - partition key range that + :param low_key: Required. String indicating the lower bound of the partition key range that should be split between the partitions. :type low_key: str - :param high_key: Required. String indicating the upper bound of the - partition key range that + :param high_key: Required. String indicating the upper bound of the partition key range that should be split between the partitions. :type high_key: str """ @@ -22209,23 +27043,29 @@ class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__(self, *, count: int, low_key: str, high_key: str, **kwargs) -> None: + def __init__( + self, + *, + count: int, + low_key: str, + high_key: str, + **kwargs + ): super(UniformInt64RangePartitionSchemeDescription, self).__init__(**kwargs) + self.partition_scheme = 'UniformInt64Range' # type: str self.count = count self.low_key = low_key self.high_key = high_key - self.partition_scheme = 'UniformInt64Range' -class UnplacedReplicaInformation(Model): +class UnplacedReplicaInformation(msrest.serialization.Model): """Contains information for an unplaced replica. :param service_name: The name of the service. :type service_name: str :param partition_id: The ID of the partition. :type partition_id: str - :param unplaced_replica_details: List of reasons due to which a replica - cannot be placed. + :param unplaced_replica_details: List of reasons due to which a replica cannot be placed. :type unplaced_replica_details: list[str] """ @@ -22235,27 +27075,32 @@ class UnplacedReplicaInformation(Model): 'unplaced_replica_details': {'key': 'UnplacedReplicaDetails', 'type': '[str]'}, } - def __init__(self, *, service_name: str=None, partition_id: str=None, unplaced_replica_details=None, **kwargs) -> None: + def __init__( + self, + *, + service_name: Optional[str] = None, + partition_id: Optional[str] = None, + unplaced_replica_details: Optional[List[str]] = None, + **kwargs + ): super(UnplacedReplicaInformation, self).__init__(**kwargs) self.service_name = service_name self.partition_id = partition_id self.unplaced_replica_details = unplaced_replica_details -class UnprovisionApplicationTypeDescriptionInfo(Model): - """Describes the operation to unregister or unprovision an application type - and its version that was registered with the Service Fabric. +class UnprovisionApplicationTypeDescriptionInfo(msrest.serialization.Model): + """Describes the operation to unregister or unprovision an application type and its version that was registered with the Service Fabric. All required parameters must be populated in order to send to Azure. - :param application_type_version: Required. The version of the application - type as defined in the application manifest. + :param application_type_version: Required. The version of the application type as defined in + the application manifest. :type application_type_version: str - :param async_property: The flag indicating whether or not unprovision - should occur asynchronously. When set to true, the unprovision operation - returns when the request is accepted by the system, and the unprovision - operation continues without any timeout limit. The default value is false. - However, we recommend setting it to true for large application packages + :param async_property: The flag indicating whether or not unprovision should occur + asynchronously. When set to true, the unprovision operation returns when the request is + accepted by the system, and the unprovision operation continues without any timeout limit. The + default value is false. However, we recommend setting it to true for large application packages that were provisioned. :type async_property: bool """ @@ -22269,13 +27114,19 @@ class UnprovisionApplicationTypeDescriptionInfo(Model): 'async_property': {'key': 'Async', 'type': 'bool'}, } - def __init__(self, *, application_type_version: str, async_property: bool=None, **kwargs) -> None: + def __init__( + self, + *, + application_type_version: str, + async_property: Optional[bool] = None, + **kwargs + ): super(UnprovisionApplicationTypeDescriptionInfo, self).__init__(**kwargs) self.application_type_version = application_type_version self.async_property = async_property -class UnprovisionFabricDescription(Model): +class UnprovisionFabricDescription(msrest.serialization.Model): """Describes the parameters for unprovisioning a cluster. :param code_version: The cluster code package version. @@ -22289,40 +27140,40 @@ class UnprovisionFabricDescription(Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__(self, *, code_version: str=None, config_version: str=None, **kwargs) -> None: + def __init__( + self, + *, + code_version: Optional[str] = None, + config_version: Optional[str] = None, + **kwargs + ): super(UnprovisionFabricDescription, self).__init__(**kwargs) self.code_version = code_version self.config_version = config_version -class UpdateClusterUpgradeDescription(Model): +class UpdateClusterUpgradeDescription(msrest.serialization.Model): """Parameters for updating a cluster upgrade. - :param upgrade_kind: The type of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling', - 'Rolling_ForceRestart'. Default value: "Rolling" . + :param upgrade_kind: The type of upgrade out of the following possible values. Possible values + include: "Invalid", "Rolling", "Rolling_ForceRestart". Default value: "Rolling". :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeType - :param update_description: Describes the parameters for updating a rolling - upgrade of application or cluster. - :type update_description: - ~azure.servicefabric.models.RollingUpgradeUpdateDescription - :param cluster_health_policy: Defines a health policy used to evaluate the - health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param enable_delta_health_evaluation: When true, enables delta health - evaluation rather than absolute health evaluation after completion of each - upgrade domain. + :param update_description: Describes the parameters for updating a rolling upgrade of + application or cluster. + :type update_description: ~azure.servicefabric.models.RollingUpgradeUpdateDescription + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than + absolute health evaluation after completion of each upgrade domain. :type enable_delta_health_evaluation: bool - :param cluster_upgrade_health_policy: Defines a health policy used to - evaluate the health of the cluster during a cluster upgrade. + :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of + the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Defines the application health - policy map used to evaluate the health of an application or one of its - children entities. - :type application_health_policy_map: - ~azure.servicefabric.models.ApplicationHealthPolicies + :param application_health_policy_map: Defines the application health policy map used to + evaluate the health of an application or one of its children entities. + :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicies """ _attribute_map = { @@ -22334,7 +27185,17 @@ class UpdateClusterUpgradeDescription(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicies'}, } - def __init__(self, *, upgrade_kind="Rolling", update_description=None, cluster_health_policy=None, enable_delta_health_evaluation: bool=None, cluster_upgrade_health_policy=None, application_health_policy_map=None, **kwargs) -> None: + def __init__( + self, + *, + upgrade_kind: Optional[Union[str, "UpgradeType"]] = "Rolling", + update_description: Optional["RollingUpgradeUpdateDescription"] = None, + cluster_health_policy: Optional["ClusterHealthPolicy"] = None, + enable_delta_health_evaluation: Optional[bool] = None, + cluster_upgrade_health_policy: Optional["ClusterUpgradeHealthPolicyObject"] = None, + application_health_policy_map: Optional["ApplicationHealthPolicies"] = None, + **kwargs + ): super(UpdateClusterUpgradeDescription, self).__init__(**kwargs) self.upgrade_kind = upgrade_kind self.update_description = update_description @@ -22344,15 +27205,13 @@ def __init__(self, *, upgrade_kind="Rolling", update_description=None, cluster_h self.application_health_policy_map = application_health_policy_map -class UpdatePartitionLoadResult(Model): - """Specifies result of updating load for specified partitions. The output will - be ordered based on the partition ID. +class UpdatePartitionLoadResult(msrest.serialization.Model): + """Specifies result of updating load for specified partitions. The output will be ordered based on the partition ID. :param partition_id: Id of the partition. :type partition_id: str - :param partition_error_code: If OperationState is Completed - this is 0. - If OperationState is Faulted - this is an error code indicating the - reason. + :param partition_error_code: If OperationState is Completed - this is 0. If OperationState is + Faulted - this is an error code indicating the reason. :type partition_error_code: int """ @@ -22361,53 +27220,58 @@ class UpdatePartitionLoadResult(Model): 'partition_error_code': {'key': 'PartitionErrorCode', 'type': 'int'}, } - def __init__(self, *, partition_id: str=None, partition_error_code: int=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + partition_error_code: Optional[int] = None, + **kwargs + ): super(UpdatePartitionLoadResult, self).__init__(**kwargs) self.partition_id = partition_id self.partition_error_code = partition_error_code class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): - """Represents health evaluation for delta unhealthy cluster nodes in an - upgrade domain, containing health evaluations for each unhealthy node that - impacted current aggregated health state. - Can be returned during cluster upgrade when cluster aggregated health state - is Warning or Error. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for delta unhealthy cluster nodes in an upgrade domain, containing health evaluations for each unhealthy node that impacted current aggregated health state. +Can be returned during cluster upgrade when cluster aggregated health state is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param upgrade_domain_name: Name of the upgrade domain where nodes health - is currently evaluated. + :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently + evaluated. :type upgrade_domain_name: str - :param baseline_error_count: Number of upgrade domain nodes with - aggregated heath state Error in the health store at the beginning of the - cluster upgrade. + :param baseline_error_count: Number of upgrade domain nodes with aggregated heath state Error + in the health store at the beginning of the cluster upgrade. :type baseline_error_count: long - :param baseline_total_count: Total number of upgrade domain nodes in the - health store at the beginning of the cluster upgrade. + :param baseline_total_count: Total number of upgrade domain nodes in the health store at the + beginning of the cluster upgrade. :type baseline_total_count: long - :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of - upgrade domain delta unhealthy nodes from the ClusterUpgradeHealthPolicy. + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of upgrade domain delta + unhealthy nodes from the ClusterUpgradeHealthPolicy. :type max_percent_delta_unhealthy_nodes: int - :param total_count: Total number of upgrade domain nodes in the health - store. + :param total_count: Total number of upgrade domain nodes in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -22415,9 +27279,9 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, @@ -22426,24 +27290,36 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, upgrade_domain_name: str=None, baseline_error_count: int=None, baseline_total_count: int=None, max_percent_delta_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + upgrade_domain_name: Optional[str] = None, + baseline_error_count: Optional[int] = None, + baseline_total_count: Optional[int] = None, + max_percent_delta_unhealthy_nodes: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(UpgradeDomainDeltaNodesCheckHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'UpgradeDomainDeltaNodesCheck' # type: str self.upgrade_domain_name = upgrade_domain_name self.baseline_error_count = baseline_error_count self.baseline_total_count = baseline_total_count self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'UpgradeDomainDeltaNodesCheck' -class UpgradeDomainInfo(Model): +class UpgradeDomainInfo(msrest.serialization.Model): """Information about an upgrade domain. - :param name: The name of the upgrade domain + :param name: The name of the upgrade domain. :type name: str - :param state: The state of the upgrade domain. Possible values include: - 'Invalid', 'Pending', 'InProgress', 'Completed' + :param state: The state of the upgrade domain. Possible values include: "Invalid", "Pending", + "InProgress", "Completed". :type state: str or ~azure.servicefabric.models.UpgradeDomainState """ @@ -22452,45 +27328,51 @@ class UpgradeDomainInfo(Model): 'state': {'key': 'State', 'type': 'str'}, } - def __init__(self, *, name: str=None, state=None, **kwargs) -> None: + def __init__( + self, + *, + name: Optional[str] = None, + state: Optional[Union[str, "UpgradeDomainState"]] = None, + **kwargs + ): super(UpgradeDomainInfo, self).__init__(**kwargs) self.name = name self.state = state class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for cluster nodes in an upgrade domain, - containing health evaluations for each unhealthy node that impacted current - aggregated health state. Can be returned when evaluating cluster health - during cluster upgrade and the aggregated health state is either Error or - Warning. - - All required parameters must be populated in order to send to Azure. - - :param aggregated_health_state: The health state of a Service Fabric - entity such as Cluster, Node, Application, Service, Partition, Replica - etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', - 'Unknown' - :type aggregated_health_state: str or - ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents - a summary of the evaluation process. + """Represents health evaluation for cluster nodes in an upgrade domain, containing health evaluations for each unhealthy node that impacted current aggregated health state. Can be returned when evaluating cluster health during cluster upgrade and the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. The health manager in the cluster performs health evaluations in + determining the aggregated health state of an entity. This enumeration provides information on + the kind of evaluation that was performed. Following are the possible values.Constant filled by + server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", + "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", + "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", + "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", + "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", + "NodeTypeNodes". + :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, + Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", + "Warning", "Error", "Unknown". + :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents a summary of the + evaluation process. :type description: str - :param kind: Required. Constant filled by server. - :type kind: str - :param upgrade_domain_name: Name of the upgrade domain where nodes health - is currently evaluated. + :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently + evaluated. :type upgrade_domain_name: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of - unhealthy nodes from the ClusterHealthPolicy. + :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes from the + ClusterHealthPolicy. :type max_percent_unhealthy_nodes: int :param total_count: Total number of nodes in the current upgrade domain. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to - the aggregated health state. Includes all the unhealthy - NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: - list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health + state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -22498,29 +27380,38 @@ class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): } _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, - 'kind': {'key': 'Kind', 'type': 'str'}, 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, *, aggregated_health_state=None, description: str=None, upgrade_domain_name: str=None, max_percent_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + def __init__( + self, + *, + aggregated_health_state: Optional[Union[str, "HealthState"]] = None, + description: Optional[str] = None, + upgrade_domain_name: Optional[str] = None, + max_percent_unhealthy_nodes: Optional[int] = None, + total_count: Optional[int] = None, + unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, + **kwargs + ): super(UpgradeDomainNodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.kind = 'UpgradeDomainNodes' # type: str self.upgrade_domain_name = upgrade_domain_name self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations - self.kind = 'UpgradeDomainNodes' -class UpgradeOrchestrationServiceState(Model): +class UpgradeOrchestrationServiceState(msrest.serialization.Model): """Service state of Service Fabric Upgrade Orchestration Service. - :param service_state: The state of Service Fabric Upgrade Orchestration - Service. + :param service_state: The state of Service Fabric Upgrade Orchestration Service. :type service_state: str """ @@ -22528,26 +27419,28 @@ class UpgradeOrchestrationServiceState(Model): 'service_state': {'key': 'ServiceState', 'type': 'str'}, } - def __init__(self, *, service_state: str=None, **kwargs) -> None: + def __init__( + self, + *, + service_state: Optional[str] = None, + **kwargs + ): super(UpgradeOrchestrationServiceState, self).__init__(**kwargs) self.service_state = service_state -class UpgradeOrchestrationServiceStateSummary(Model): +class UpgradeOrchestrationServiceStateSummary(msrest.serialization.Model): """Service state summary of Service Fabric Upgrade Orchestration Service. :param current_code_version: The current code version of the cluster. :type current_code_version: str - :param current_manifest_version: The current manifest version of the - cluster. + :param current_manifest_version: The current manifest version of the cluster. :type current_manifest_version: str :param target_code_version: The target code version of the cluster. :type target_code_version: str - :param target_manifest_version: The target manifest version of the - cluster. + :param target_manifest_version: The target manifest version of the cluster. :type target_manifest_version: str - :param pending_upgrade_type: The type of the pending upgrade of the - cluster. + :param pending_upgrade_type: The type of the pending upgrade of the cluster. :type pending_upgrade_type: str """ @@ -22559,7 +27452,16 @@ class UpgradeOrchestrationServiceStateSummary(Model): 'pending_upgrade_type': {'key': 'PendingUpgradeType', 'type': 'str'}, } - def __init__(self, *, current_code_version: str=None, current_manifest_version: str=None, target_code_version: str=None, target_manifest_version: str=None, pending_upgrade_type: str=None, **kwargs) -> None: + def __init__( + self, + *, + current_code_version: Optional[str] = None, + current_manifest_version: Optional[str] = None, + target_code_version: Optional[str] = None, + target_manifest_version: Optional[str] = None, + pending_upgrade_type: Optional[str] = None, + **kwargs + ): super(UpgradeOrchestrationServiceStateSummary, self).__init__(**kwargs) self.current_code_version = current_code_version self.current_manifest_version = current_manifest_version @@ -22568,14 +27470,14 @@ def __init__(self, *, current_code_version: str=None, current_manifest_version: self.pending_upgrade_type = pending_upgrade_type -class UploadChunkRange(Model): +class UploadChunkRange(msrest.serialization.Model): """Information about which portion of the file to upload. - :param start_position: The start position of the portion of the file. It's - represented by the number of bytes. + :param start_position: The start position of the portion of the file. It's represented by the + number of bytes. :type start_position: str - :param end_position: The end position of the portion of the file. It's - represented by the number of bytes. + :param end_position: The end position of the portion of the file. It's represented by the + number of bytes. :type end_position: str """ @@ -22584,19 +27486,24 @@ class UploadChunkRange(Model): 'end_position': {'key': 'EndPosition', 'type': 'str'}, } - def __init__(self, *, start_position: str=None, end_position: str=None, **kwargs) -> None: + def __init__( + self, + *, + start_position: Optional[str] = None, + end_position: Optional[str] = None, + **kwargs + ): super(UploadChunkRange, self).__init__(**kwargs) self.start_position = start_position self.end_position = end_position -class UploadSession(Model): +class UploadSession(msrest.serialization.Model): """Information about a image store upload session. - :param upload_sessions: When querying upload session by upload session ID, - the result contains only one upload session. When querying upload session - by image store relative path, the result might contain multiple upload - sessions. + :param upload_sessions: When querying upload session by upload session ID, the result contains + only one upload session. When querying upload session by image store relative path, the result + might contain multiple upload sessions. :type upload_sessions: list[~azure.servicefabric.models.UploadSessionInfo] """ @@ -22604,28 +27511,30 @@ class UploadSession(Model): 'upload_sessions': {'key': 'UploadSessions', 'type': '[UploadSessionInfo]'}, } - def __init__(self, *, upload_sessions=None, **kwargs) -> None: + def __init__( + self, + *, + upload_sessions: Optional[List["UploadSessionInfo"]] = None, + **kwargs + ): super(UploadSession, self).__init__(**kwargs) self.upload_sessions = upload_sessions -class UploadSessionInfo(Model): - """Information about an image store upload session. A session is associated - with a relative path in the image store. +class UploadSessionInfo(msrest.serialization.Model): + """Information about an image store upload session. A session is associated with a relative path in the image store. - :param store_relative_path: The remote location within image store. This - path is relative to the image store root. + :param store_relative_path: The remote location within image store. This path is relative to + the image store root. :type store_relative_path: str - :param session_id: A unique ID of the upload session. A session ID can be - reused only if the session was committed or removed. + :param session_id: A unique ID of the upload session. A session ID can be reused only if the + session was committed or removed. :type session_id: str - :param modified_date: The date and time when the upload session was last - modified. - :type modified_date: datetime + :param modified_date: The date and time when the upload session was last modified. + :type modified_date: ~datetime.datetime :param file_size: The size in bytes of the uploading file. :type file_size: str - :param expected_ranges: List of chunk ranges that image store has not - received yet. + :param expected_ranges: List of chunk ranges that image store has not received yet. :type expected_ranges: list[~azure.servicefabric.models.UploadChunkRange] """ @@ -22637,7 +27546,16 @@ class UploadSessionInfo(Model): 'expected_ranges': {'key': 'ExpectedRanges', 'type': '[UploadChunkRange]'}, } - def __init__(self, *, store_relative_path: str=None, session_id: str=None, modified_date=None, file_size: str=None, expected_ranges=None, **kwargs) -> None: + def __init__( + self, + *, + store_relative_path: Optional[str] = None, + session_id: Optional[str] = None, + modified_date: Optional[datetime.datetime] = None, + file_size: Optional[str] = None, + expected_ranges: Optional[List["UploadChunkRange"]] = None, + **kwargs + ): super(UploadSessionInfo, self).__init__(**kwargs) self.store_relative_path = store_relative_path self.session_id = session_id @@ -22646,13 +27564,12 @@ def __init__(self, *, store_relative_path: str=None, session_id: str=None, modif self.expected_ranges = expected_ranges -class UsageInfo(Model): - """Information about how much space and how many files in the file system the - ImageStore is using in this category. +class UsageInfo(msrest.serialization.Model): + """Information about how much space and how many files in the file system the ImageStore is using in this category. - :param used_space: the size of all files in this category + :param used_space: the size of all files in this category. :type used_space: str - :param file_count: the number of all files in this category + :param file_count: the number of all files in this category. :type file_count: str """ @@ -22661,7 +27578,13 @@ class UsageInfo(Model): 'file_count': {'key': 'FileCount', 'type': 'str'}, } - def __init__(self, *, used_space: str=None, file_count: str=None, **kwargs) -> None: + def __init__( + self, + *, + used_space: Optional[str] = None, + file_count: Optional[str] = None, + **kwargs + ): super(UsageInfo, self).__init__(**kwargs) self.used_space = used_space self.file_count = file_count @@ -22672,48 +27595,53 @@ class ValidationFailedChaosEvent(ChaosEvent): All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param reason: Describes why the ValidationFailedChaosEvent was generated. - This may happen because more than MaxPercentUnhealthyNodes are unhealthy - for more than MaxClusterStabilizationTimeout. This reason will be in the - Reason property of the ValidationFailedChaosEvent as a string. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param reason: Describes why the ValidationFailedChaosEvent was generated. This may happen + because more than MaxPercentUnhealthyNodes are unhealthy for more than + MaxClusterStabilizationTimeout. This reason will be in the Reason property of the + ValidationFailedChaosEvent as a string. :type reason: str """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: + def __init__( + self, + *, + time_stamp_utc: datetime.datetime, + reason: Optional[str] = None, + **kwargs + ): super(ValidationFailedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.kind = 'ValidationFailed' # type: str self.reason = reason - self.kind = 'ValidationFailed' -class VolumeProviderParametersAzureFile(Model): +class VolumeProviderParametersAzureFile(msrest.serialization.Model): """This type describes a volume provided by an Azure Files file share. All required parameters must be populated in order to send to Azure. - :param account_name: Required. Name of the Azure storage account for the - File Share. + :param account_name: Required. Name of the Azure storage account for the File Share. :type account_name: str - :param account_key: Access key of the Azure storage account for the File - Share. + :param account_key: Access key of the Azure storage account for the File Share. :type account_key: str - :param share_name: Required. Name of the Azure Files file share that - provides storage for the volume. + :param share_name: Required. Name of the Azure Files file share that provides storage for the + volume. :type share_name: str """ @@ -22728,18 +27656,24 @@ class VolumeProviderParametersAzureFile(Model): 'share_name': {'key': 'shareName', 'type': 'str'}, } - def __init__(self, *, account_name: str, share_name: str, account_key: str=None, **kwargs) -> None: + def __init__( + self, + *, + account_name: str, + share_name: str, + account_key: Optional[str] = None, + **kwargs + ): super(VolumeProviderParametersAzureFile, self).__init__(**kwargs) self.account_name = account_name self.account_key = account_key self.share_name = share_name -class VolumeResourceDescription(Model): +class VolumeResourceDescription(msrest.serialization.Model): """This type describes a volume resource. - Variables are only populated by the server, and will be ignored when - sending a request. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. @@ -22747,26 +27681,23 @@ class VolumeResourceDescription(Model): :type name: str :param description: User readable description of the volume. :type description: str - :ivar status: Status of the volume. Possible values include: 'Unknown', - 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :ivar status: Status of the volume. Possible values include: "Unknown", "Ready", "Upgrading", + "Creating", "Deleting", "Failed". :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current - status of the volume. + :ivar status_details: Gives additional information about the current status of the volume. :vartype status_details: str - :ivar provider: Required. Provider of the volume. Default value: - "SFAzureFile" . - :vartype provider: str - :param azure_file_parameters: This type describes a volume provided by an - Azure Files file share. - :type azure_file_parameters: - ~azure.servicefabric.models.VolumeProviderParametersAzureFile + :param provider: Required. Provider of the volume. Possible values include: "SFAzureFile". + :type provider: str or ~azure.servicefabric.models.VolumeProvider + :param azure_file_parameters: This type describes a volume provided by an Azure Files file + share. + :type azure_file_parameters: ~azure.servicefabric.models.VolumeProviderParametersAzureFile """ _validation = { 'name': {'required': True}, 'status': {'readonly': True}, 'status_details': {'readonly': True}, - 'provider': {'required': True, 'constant': True}, + 'provider': {'required': True}, } _attribute_map = { @@ -22778,29 +27709,36 @@ class VolumeResourceDescription(Model): 'azure_file_parameters': {'key': 'properties.azureFileParameters', 'type': 'VolumeProviderParametersAzureFile'}, } - provider = "SFAzureFile" - - def __init__(self, *, name: str, description: str=None, azure_file_parameters=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + provider: Union[str, "VolumeProvider"], + description: Optional[str] = None, + azure_file_parameters: Optional["VolumeProviderParametersAzureFile"] = None, + **kwargs + ): super(VolumeResourceDescription, self).__init__(**kwargs) self.name = name self.description = description self.status = None self.status_details = None + self.provider = provider self.azure_file_parameters = azure_file_parameters class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the replica build operation to finish. This - indicates that there is a replica that is going through the copy or is - providing data for building another replica. Bring the node down will abort - this copy operation which are typically expensive involving data movements. + """Safety check that waits for the replica build operation to finish. This indicates that there is a replica that is going through the copy or is providing data for building another replica. Bring the node down will abort this copy operation which are typically expensive involving data movements. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -22813,21 +27751,28 @@ class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + **kwargs + ): super(WaitForInbuildReplicaSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'WaitForInbuildReplica' + self.kind = 'WaitForInbuildReplica' # type: str class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the primary replica that was moved out of the - node due to upgrade to be placed back again on that node. + """Safety check that waits for the primary replica that was moved out of the node due to upgrade to be placed back again on that node. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -22840,22 +27785,28 @@ class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + **kwargs + ): super(WaitForPrimaryPlacementSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'WaitForPrimaryPlacement' + self.kind = 'WaitForPrimaryPlacement' # type: str class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the primary replica to be moved out of the node - before starting an upgrade to ensure the availability of the primary - replica for the partition. + """Safety check that waits for the primary replica to be moved out of the node before starting an upgrade to ensure the availability of the primary replica for the partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -22868,21 +27819,28 @@ class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + **kwargs + ): super(WaitForPrimarySwapSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'WaitForPrimarySwap' + self.kind = 'WaitForPrimarySwap' # type: str class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the current reconfiguration of the partition to - be completed before starting an upgrade. + """Safety check that waits for the current reconfiguration of the partition to be completed before starting an upgrade. All required parameters must be populated in order to send to Azure. - :param kind: Required. Constant filled by server. - :type kind: str - :param partition_id: Id of the partition which is undergoing the safety - check. + :param kind: Required. The kind of safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service and the reliability of + the state. Following are the kinds of safety checks.Constant filled by server. Possible values + include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", + "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". + :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param partition_id: Id of the partition which is undergoing the safety check. :type partition_id: str """ @@ -22895,40 +27853,50 @@ class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, *, partition_id: str=None, **kwargs) -> None: + def __init__( + self, + *, + partition_id: Optional[str] = None, + **kwargs + ): super(WaitForReconfigurationSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'WaitForReconfiguration' + self.kind = 'WaitForReconfiguration' # type: str class WaitingChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos is waiting for the - cluster to become ready for faulting, for example, Chaos may be waiting for - the on-going upgrade to finish. + """Describes a Chaos event that gets generated when Chaos is waiting for the cluster to become ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. All required parameters must be populated in order to send to Azure. - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event - was generated. - :type time_stamp_utc: datetime - :param kind: Required. Constant filled by server. - :type kind: str - :param reason: Describes why the WaitingChaosEvent was generated, for - example, due to a cluster upgrade. + :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values + include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", + "Stopped". + :type kind: str or ~azure.servicefabric.models.ChaosEventKind + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. + :type time_stamp_utc: ~datetime.datetime + :param reason: Describes why the WaitingChaosEvent was generated, for example, due to a cluster + upgrade. :type reason: str """ _validation = { - 'time_stamp_utc': {'required': True}, 'kind': {'required': True}, + 'time_stamp_utc': {'required': True}, } _attribute_map = { - 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'kind': {'key': 'Kind', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: + def __init__( + self, + *, + time_stamp_utc: datetime.datetime, + reason: Optional[str] = None, + **kwargs + ): super(WaitingChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.kind = 'Waiting' # type: str self.reason = reason - self.kind = 'Waiting' diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_ap_is_enums.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_ap_is_enums.py deleted file mode 100644 index 8242aaf8c223..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_ap_is_enums.py +++ /dev/null @@ -1,1079 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum - - -class HealthState(str, Enum): - - invalid = "Invalid" #: Indicates an invalid health state. All Service Fabric enumerations have the invalid type. The value is zero. - ok = "Ok" #: Indicates the health state is okay. The value is 1. - warning = "Warning" #: Indicates the health state is at a warning level. The value is 2. - error = "Error" #: Indicates the health state is at an error level. Error health state should be investigated, as they can impact the correct functionality of the cluster. The value is 3. - unknown = "Unknown" #: Indicates an unknown health status. The value is 65535. - - -class FabricErrorCodes(str, Enum): - - fabric_e_invalid_partition_key = "FABRIC_E_INVALID_PARTITION_KEY" - fabric_e_imagebuilder_validation_error = "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" - fabric_e_invalid_address = "FABRIC_E_INVALID_ADDRESS" - fabric_e_application_not_upgrading = "FABRIC_E_APPLICATION_NOT_UPGRADING" - fabric_e_application_upgrade_validation_error = "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" - fabric_e_fabric_not_upgrading = "FABRIC_E_FABRIC_NOT_UPGRADING" - fabric_e_fabric_upgrade_validation_error = "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" - fabric_e_invalid_configuration = "FABRIC_E_INVALID_CONFIGURATION" - fabric_e_invalid_name_uri = "FABRIC_E_INVALID_NAME_URI" - fabric_e_path_too_long = "FABRIC_E_PATH_TOO_LONG" - fabric_e_key_too_large = "FABRIC_E_KEY_TOO_LARGE" - fabric_e_service_affinity_chain_not_supported = "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" - fabric_e_invalid_atomic_group = "FABRIC_E_INVALID_ATOMIC_GROUP" - fabric_e_value_empty = "FABRIC_E_VALUE_EMPTY" - fabric_e_node_not_found = "FABRIC_E_NODE_NOT_FOUND" - fabric_e_application_type_not_found = "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" - fabric_e_application_not_found = "FABRIC_E_APPLICATION_NOT_FOUND" - fabric_e_service_type_not_found = "FABRIC_E_SERVICE_TYPE_NOT_FOUND" - fabric_e_service_does_not_exist = "FABRIC_E_SERVICE_DOES_NOT_EXIST" - fabric_e_service_type_template_not_found = "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" - fabric_e_configuration_section_not_found = "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" - fabric_e_partition_not_found = "FABRIC_E_PARTITION_NOT_FOUND" - fabric_e_replica_does_not_exist = "FABRIC_E_REPLICA_DOES_NOT_EXIST" - fabric_e_service_group_does_not_exist = "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" - fabric_e_configuration_parameter_not_found = "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" - fabric_e_directory_not_found = "FABRIC_E_DIRECTORY_NOT_FOUND" - fabric_e_fabric_version_not_found = "FABRIC_E_FABRIC_VERSION_NOT_FOUND" - fabric_e_file_not_found = "FABRIC_E_FILE_NOT_FOUND" - fabric_e_name_does_not_exist = "FABRIC_E_NAME_DOES_NOT_EXIST" - fabric_e_property_does_not_exist = "FABRIC_E_PROPERTY_DOES_NOT_EXIST" - fabric_e_enumeration_completed = "FABRIC_E_ENUMERATION_COMPLETED" - fabric_e_service_manifest_not_found = "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" - fabric_e_key_not_found = "FABRIC_E_KEY_NOT_FOUND" - fabric_e_health_entity_not_found = "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" - fabric_e_application_type_already_exists = "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" - fabric_e_application_already_exists = "FABRIC_E_APPLICATION_ALREADY_EXISTS" - fabric_e_application_already_in_target_version = "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" - fabric_e_application_type_provision_in_progress = "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" - fabric_e_application_upgrade_in_progress = "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" - fabric_e_service_already_exists = "FABRIC_E_SERVICE_ALREADY_EXISTS" - fabric_e_service_group_already_exists = "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" - fabric_e_application_type_in_use = "FABRIC_E_APPLICATION_TYPE_IN_USE" - fabric_e_fabric_already_in_target_version = "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" - fabric_e_fabric_version_already_exists = "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" - fabric_e_fabric_version_in_use = "FABRIC_E_FABRIC_VERSION_IN_USE" - fabric_e_fabric_upgrade_in_progress = "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" - fabric_e_name_already_exists = "FABRIC_E_NAME_ALREADY_EXISTS" - fabric_e_name_not_empty = "FABRIC_E_NAME_NOT_EMPTY" - fabric_e_property_check_failed = "FABRIC_E_PROPERTY_CHECK_FAILED" - fabric_e_service_metadata_mismatch = "FABRIC_E_SERVICE_METADATA_MISMATCH" - fabric_e_service_type_mismatch = "FABRIC_E_SERVICE_TYPE_MISMATCH" - fabric_e_health_stale_report = "FABRIC_E_HEALTH_STALE_REPORT" - fabric_e_sequence_number_check_failed = "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" - fabric_e_node_has_not_stopped_yet = "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" - fabric_e_instance_id_mismatch = "FABRIC_E_INSTANCE_ID_MISMATCH" - fabric_e_value_too_large = "FABRIC_E_VALUE_TOO_LARGE" - fabric_e_no_write_quorum = "FABRIC_E_NO_WRITE_QUORUM" - fabric_e_not_primary = "FABRIC_E_NOT_PRIMARY" - fabric_e_not_ready = "FABRIC_E_NOT_READY" - fabric_e_reconfiguration_pending = "FABRIC_E_RECONFIGURATION_PENDING" - fabric_e_service_offline = "FABRIC_E_SERVICE_OFFLINE" - e_abort = "E_ABORT" - fabric_e_communication_error = "FABRIC_E_COMMUNICATION_ERROR" - fabric_e_operation_not_complete = "FABRIC_E_OPERATION_NOT_COMPLETE" - fabric_e_timeout = "FABRIC_E_TIMEOUT" - fabric_e_node_is_up = "FABRIC_E_NODE_IS_UP" - e_fail = "E_FAIL" - fabric_e_backup_is_enabled = "FABRIC_E_BACKUP_IS_ENABLED" - fabric_e_restore_source_target_partition_mismatch = "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" - fabric_e_invalid_for_stateless_services = "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" - fabric_e_backup_not_enabled = "FABRIC_E_BACKUP_NOT_ENABLED" - fabric_e_backup_policy_not_existing = "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" - fabric_e_fault_analysis_service_not_existing = "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" - fabric_e_backup_in_progress = "FABRIC_E_BACKUP_IN_PROGRESS" - fabric_e_restore_in_progress = "FABRIC_E_RESTORE_IN_PROGRESS" - fabric_e_backup_policy_already_existing = "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" - fabric_e_invalid_service_scaling_policy = "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" - e_invalidarg = "E_INVALIDARG" - fabric_e_single_instance_application_already_exists = "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" - fabric_e_single_instance_application_not_found = "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" - fabric_e_volume_already_exists = "FABRIC_E_VOLUME_ALREADY_EXISTS" - fabric_e_volume_not_found = "FABRIC_E_VOLUME_NOT_FOUND" - serialization_error = "SerializationError" - fabric_e_imagebuilder_reserved_directory_error = "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" - - -class ApplicationDefinitionKind(str, Enum): - - invalid = "Invalid" #: Indicates the application definition kind is invalid. All Service Fabric enumerations have the invalid type. The value is 65535. - service_fabric_application_description = "ServiceFabricApplicationDescription" #: Indicates the application is defined by a Service Fabric application description. The value is 0. - compose = "Compose" #: Indicates the application is defined by compose file(s). The value is 1. - - -class ApplicationStatus(str, Enum): - - invalid = "Invalid" #: Indicates the application status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - ready = "Ready" #: Indicates the application status is ready. The value is 1. - upgrading = "Upgrading" #: Indicates the application status is upgrading. The value is 2. - creating = "Creating" #: Indicates the application status is creating. The value is 3. - deleting = "Deleting" #: Indicates the application status is deleting. The value is 4. - failed = "Failed" #: Indicates the creation or deletion of application was terminated due to persistent failures. Another create/delete request can be accepted to resume a failed application. The value is 5. - - -class ApplicationPackageCleanupPolicy(str, Enum): - - invalid = "Invalid" #: Indicates that the application package cleanup policy is invalid. This value is default. The value is zero. - default = "Default" #: Indicates that the cleanup policy of application packages is based on the cluster setting "CleanupApplicationPackageOnProvisionSuccess." The value is 1. - automatic = "Automatic" #: Indicates that the service fabric runtime determines when to do the application package cleanup. By default, cleanup is done on successful provision. The value is 2. - manual = "Manual" #: Indicates that the user has to explicitly clean up the application package. The value is 3. - - -class ApplicationTypeDefinitionKind(str, Enum): - - invalid = "Invalid" #: Indicates the application type definition kind is invalid. All Service Fabric enumerations have the invalid type. The value is 0. - service_fabric_application_package = "ServiceFabricApplicationPackage" #: Indicates the application type is defined and created by a Service Fabric application package provided by the user. The value is 1. - compose = "Compose" #: Indicates the application type is defined and created implicitly as part of a compose deployment. The value is 2. - - -class ApplicationTypeStatus(str, Enum): - - invalid = "Invalid" #: Indicates the application type status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - provisioning = "Provisioning" #: Indicates that the application type is being provisioned in the cluster. The value is 1. - available = "Available" #: Indicates that the application type is fully provisioned and is available for use. An application of this type and version can be created. The value is 2. - unprovisioning = "Unprovisioning" #: Indicates that the application type is in process of being unprovisioned from the cluster. The value is 3. - failed = "Failed" #: Indicates that the application type provisioning failed and it is unavailable for use. The failure details can be obtained from the application type information query. The failed application type information remains in the cluster until it is unprovisioned or reprovisioned successfully. The value is 4. - - -class UpgradeKind(str, Enum): - - invalid = "Invalid" #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - rolling = "Rolling" #: The upgrade progresses one upgrade domain at a time. The value is 1 - - -class UpgradeMode(str, Enum): - - invalid = "Invalid" #: Indicates the upgrade mode is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - unmonitored_auto = "UnmonitoredAuto" #: The upgrade will proceed automatically without performing any health monitoring. The value is 1 - unmonitored_manual = "UnmonitoredManual" #: The upgrade will stop after completing each upgrade domain, giving the opportunity to manually monitor health before proceeding. The value is 2 - monitored = "Monitored" #: The upgrade will stop after completing each upgrade domain and automatically monitor health before proceeding. The value is 3 - - -class UpgradeSortOrder(str, Enum): - - invalid = "Invalid" #: Indicates that this sort order is not valid. All Service Fabric enumerations have the invalid type. The value is 0. - default = "Default" #: Indicates that the default sort order (as specified in cluster manifest) will be used. The value is 1. - numeric = "Numeric" #: Indicates that forward numeric sort order (UD names sorted as numbers) will be used. The value is 2. - lexicographical = "Lexicographical" #: Indicates that forward lexicographical sort order (UD names sorted as strings) will be used. The value is 3. - reverse_numeric = "ReverseNumeric" #: Indicates that reverse numeric sort order (UD names sorted as numbers) will be used. The value is 4. - reverse_lexicographical = "ReverseLexicographical" #: Indicates that reverse lexicographical sort order (UD names sorted as strings) will be used. The value is 5. - - -class FailureAction(str, Enum): - - invalid = "Invalid" #: Indicates the failure action is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - rollback = "Rollback" #: The upgrade will start rolling back automatically. The value is 1 - manual = "Manual" #: The upgrade will switch to UnmonitoredManual upgrade mode. The value is 2 - - -class UpgradeDomainState(str, Enum): - - invalid = "Invalid" #: Indicates the upgrade domain state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - pending = "Pending" #: The upgrade domain has not started upgrading yet. The value is 1 - in_progress = "InProgress" #: The upgrade domain is being upgraded but not complete yet. The value is 2 - completed = "Completed" #: The upgrade domain has completed upgrade. The value is 3 - - -class UpgradeState(str, Enum): - - invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - rolling_back_in_progress = "RollingBackInProgress" #: The upgrade is rolling back to the previous version but is not complete yet. The value is 1 - rolling_back_completed = "RollingBackCompleted" #: The upgrade has finished rolling back. The value is 2 - rolling_forward_pending = "RollingForwardPending" #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an explicit move next request in UnmonitoredManual mode or performing health checks in Monitored mode. The value is 3 - rolling_forward_in_progress = "RollingForwardInProgress" #: The upgrade is rolling forward to the target version but is not complete yet. The value is 4 - rolling_forward_completed = "RollingForwardCompleted" #: The upgrade has finished rolling forward. The value is 5 - failed = "Failed" #: The upgrade has failed and is unable to execute FailureAction. The value is 6 - - -class NodeUpgradePhase(str, Enum): - - invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - pre_upgrade_safety_check = "PreUpgradeSafetyCheck" #: The upgrade has not started yet due to pending safety checks. The value is 1 - upgrading = "Upgrading" #: The upgrade is in progress. The value is 2 - post_upgrade_safety_check = "PostUpgradeSafetyCheck" #: The upgrade has completed and post upgrade safety checks are being performed. The value is 3 - - -class FailureReason(str, Enum): - - none = "None" #: Indicates the reason is invalid or unknown. All Service Fabric enumerations have the invalid type. The value is zero. - interrupted = "Interrupted" #: There was an external request to roll back the upgrade. The value is 1 - health_check = "HealthCheck" #: The upgrade failed due to health policy violations. The value is 2 - upgrade_domain_timeout = "UpgradeDomainTimeout" #: An upgrade domain took longer than the allowed upgrade domain timeout to process. The value is 3 - overall_upgrade_timeout = "OverallUpgradeTimeout" #: The overall upgrade took longer than the allowed upgrade timeout to process. The value is 4 - - -class DeactivationIntent(str, Enum): - - pause = "Pause" #: Indicates that the node should be paused. The value is 1. - restart = "Restart" #: Indicates that the intent is for the node to be restarted after a short period of time. The value is 2. - remove_data = "RemoveData" #: Indicates the intent is for the node to remove data. The value is 3. - - -class DeployedApplicationStatus(str, Enum): - - invalid = "Invalid" #: Indicates that deployment status is not valid. All Service Fabric enumerations have the invalid type. The value is zero. - downloading = "Downloading" #: Indicates that the package is downloading from the ImageStore. The value is 1. - activating = "Activating" #: Indicates that the package is activating. The value is 2. - active = "Active" #: Indicates that the package is active. The value is 3. - upgrading = "Upgrading" #: Indicates that the package is upgrading. The value is 4. - deactivating = "Deactivating" #: Indicates that the package is deactivating. The value is 5. - - -class ReplicaStatus(str, Enum): - - invalid = "Invalid" #: Indicates the replica status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - in_build = "InBuild" #: The replica is being built. This means that a primary replica is seeding this replica. The value is 1. - standby = "Standby" #: The replica is in standby. The value is 2. - ready = "Ready" #: The replica is ready. The value is 3. - down = "Down" #: The replica is down. The value is 4. - dropped = "Dropped" #: Replica is dropped. This means that the replica has been removed from the replica set. If it is persisted, its state has been deleted. The value is 5. - - -class ReplicaRole(str, Enum): - - unknown = "Unknown" #: Indicates the initial role that a replica is created in. The value is zero. - none = "None" #: Specifies that the replica has no responsibility in regard to the replica set. The value is 1 - primary = "Primary" #: Refers to the replica in the set on which all read and write operations are complete in order to enforce strong consistency semantics. Read operations are handled directly by the Primary replica, while write operations must be acknowledged by a quorum of the replicas in the replica set. There can only be one Primary replica in a replica set at a time. The value is 2. - idle_secondary = "IdleSecondary" #: Refers to a replica in the set that receives a state transfer from the Primary replica to prepare for becoming an active Secondary replica. There can be multiple Idle Secondary replicas in a replica set at a time. Idle Secondary replicas do not count as a part of a write quorum. The value is 3. - active_secondary = "ActiveSecondary" #: Refers to a replica in the set that receives state updates from the Primary replica, applies them, and sends acknowledgements back. Secondary replicas must participate in the write quorum for a replica set. There can be multiple active Secondary replicas in a replica set at a time. The number of active Secondary replicas is configurable that the reliability subsystem should maintain. The value is 4. - - -class ReconfigurationPhase(str, Enum): - - unknown = "Unknown" #: Indicates the invalid reconfiguration phase. - none = "None" #: Specifies that there is no reconfiguration in progress. - phase0 = "Phase0" #: Refers to the phase where the reconfiguration is transferring data from the previous primary to the new primary. - phase1 = "Phase1" #: Refers to the phase where the reconfiguration is querying the replica set for the progress. - phase2 = "Phase2" #: Refers to the phase where the reconfiguration is ensuring that data from the current primary is present in a majority of the replica set. - phase3 = "Phase3" #: This phase is for internal use only. - phase4 = "Phase4" #: This phase is for internal use only. - abort_phase_zero = "AbortPhaseZero" #: This phase is for internal use only. - - -class ReconfigurationType(str, Enum): - - unknown = "Unknown" #: Indicates the invalid reconfiguration type. - swap_primary = "SwapPrimary" #: Specifies that the primary replica is being swapped with a different replica. - failover = "Failover" #: Reconfiguration triggered in response to a primary going down. This could be due to many reasons such as primary replica crashing etc. - other = "Other" #: Reconfigurations where the primary replica is not changing. - - -class EntityKind(str, Enum): - - invalid = "Invalid" #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. The value is zero. - node = "Node" #: Indicates the entity is a Service Fabric node. The value is 1. - partition = "Partition" #: Indicates the entity is a Service Fabric partition. The value is 2. - service = "Service" #: Indicates the entity is a Service Fabric service. The value is 3. - application = "Application" #: Indicates the entity is a Service Fabric application. The value is 4. - replica = "Replica" #: Indicates the entity is a Service Fabric replica. The value is 5. - deployed_application = "DeployedApplication" #: Indicates the entity is a Service Fabric deployed application. The value is 6. - deployed_service_package = "DeployedServicePackage" #: Indicates the entity is a Service Fabric deployed service package. The value is 7. - cluster = "Cluster" #: Indicates the entity is a Service Fabric cluster. The value is 8. - - -class FabricEventKind(str, Enum): - - cluster_event = "ClusterEvent" - container_instance_event = "ContainerInstanceEvent" - node_event = "NodeEvent" - application_event = "ApplicationEvent" - service_event = "ServiceEvent" - partition_event = "PartitionEvent" - replica_event = "ReplicaEvent" - partition_analysis_event = "PartitionAnalysisEvent" - application_created = "ApplicationCreated" - application_deleted = "ApplicationDeleted" - application_new_health_report = "ApplicationNewHealthReport" - application_health_report_expired = "ApplicationHealthReportExpired" - application_upgrade_completed = "ApplicationUpgradeCompleted" - application_upgrade_domain_completed = "ApplicationUpgradeDomainCompleted" - application_upgrade_rollback_completed = "ApplicationUpgradeRollbackCompleted" - application_upgrade_rollback_started = "ApplicationUpgradeRollbackStarted" - application_upgrade_started = "ApplicationUpgradeStarted" - deployed_application_new_health_report = "DeployedApplicationNewHealthReport" - deployed_application_health_report_expired = "DeployedApplicationHealthReportExpired" - application_process_exited = "ApplicationProcessExited" - application_container_instance_exited = "ApplicationContainerInstanceExited" - node_aborted = "NodeAborted" - node_added_to_cluster = "NodeAddedToCluster" - node_closed = "NodeClosed" - node_deactivate_completed = "NodeDeactivateCompleted" - node_deactivate_started = "NodeDeactivateStarted" - node_down = "NodeDown" - node_new_health_report = "NodeNewHealthReport" - node_health_report_expired = "NodeHealthReportExpired" - node_open_succeeded = "NodeOpenSucceeded" - node_open_failed = "NodeOpenFailed" - node_removed_from_cluster = "NodeRemovedFromCluster" - node_up = "NodeUp" - partition_new_health_report = "PartitionNewHealthReport" - partition_health_report_expired = "PartitionHealthReportExpired" - partition_reconfigured = "PartitionReconfigured" - partition_primary_move_analysis = "PartitionPrimaryMoveAnalysis" - service_created = "ServiceCreated" - service_deleted = "ServiceDeleted" - service_new_health_report = "ServiceNewHealthReport" - service_health_report_expired = "ServiceHealthReportExpired" - deployed_service_package_new_health_report = "DeployedServicePackageNewHealthReport" - deployed_service_package_health_report_expired = "DeployedServicePackageHealthReportExpired" - stateful_replica_new_health_report = "StatefulReplicaNewHealthReport" - stateful_replica_health_report_expired = "StatefulReplicaHealthReportExpired" - stateless_replica_new_health_report = "StatelessReplicaNewHealthReport" - stateless_replica_health_report_expired = "StatelessReplicaHealthReportExpired" - cluster_new_health_report = "ClusterNewHealthReport" - cluster_health_report_expired = "ClusterHealthReportExpired" - cluster_upgrade_completed = "ClusterUpgradeCompleted" - cluster_upgrade_domain_completed = "ClusterUpgradeDomainCompleted" - cluster_upgrade_rollback_completed = "ClusterUpgradeRollbackCompleted" - cluster_upgrade_rollback_started = "ClusterUpgradeRollbackStarted" - cluster_upgrade_started = "ClusterUpgradeStarted" - chaos_stopped = "ChaosStopped" - chaos_started = "ChaosStarted" - chaos_code_package_restart_scheduled = "ChaosCodePackageRestartScheduled" - chaos_replica_removal_scheduled = "ChaosReplicaRemovalScheduled" - chaos_partition_secondary_move_scheduled = "ChaosPartitionSecondaryMoveScheduled" - chaos_partition_primary_move_scheduled = "ChaosPartitionPrimaryMoveScheduled" - chaos_replica_restart_scheduled = "ChaosReplicaRestartScheduled" - chaos_node_restart_scheduled = "ChaosNodeRestartScheduled" - - -class HealthEvaluationKind(str, Enum): - - invalid = "Invalid" #: Indicates that the health evaluation is invalid. The value is zero. - event = "Event" #: Indicates that the health evaluation is for a health event. The value is 1. - replicas = "Replicas" #: Indicates that the health evaluation is for the replicas of a partition. The value is 2. - partitions = "Partitions" #: Indicates that the health evaluation is for the partitions of a service. The value is 3. - deployed_service_packages = "DeployedServicePackages" #: Indicates that the health evaluation is for the deployed service packages of a deployed application. The value is 4. - deployed_applications = "DeployedApplications" #: Indicates that the health evaluation is for the deployed applications of an application. The value is 5. - services = "Services" #: Indicates that the health evaluation is for services of an application. The value is 6. - nodes = "Nodes" #: Indicates that the health evaluation is for the cluster nodes. The value is 7. - applications = "Applications" #: Indicates that the health evaluation is for the cluster applications. The value is 8. - system_application = "SystemApplication" #: Indicates that the health evaluation is for the system application. The value is 9. - upgrade_domain_deployed_applications = "UpgradeDomainDeployedApplications" #: Indicates that the health evaluation is for the deployed applications of an application in an upgrade domain. The value is 10. - upgrade_domain_nodes = "UpgradeDomainNodes" #: Indicates that the health evaluation is for the cluster nodes in an upgrade domain. The value is 11. - replica = "Replica" #: Indicates that the health evaluation is for a replica. The value is 13. - partition = "Partition" #: Indicates that the health evaluation is for a partition. The value is 14. - deployed_service_package = "DeployedServicePackage" #: Indicates that the health evaluation is for a deployed service package. The value is 16. - deployed_application = "DeployedApplication" #: Indicates that the health evaluation is for a deployed application. The value is 17. - service = "Service" #: Indicates that the health evaluation is for a service. The value is 15. - node = "Node" #: Indicates that the health evaluation is for a node. The value is 12. - application = "Application" #: Indicates that the health evaluation is for an application. The value is 18. - delta_nodes_check = "DeltaNodesCheck" #: Indicates that the health evaluation is for the delta of unhealthy cluster nodes. The value is 19. - upgrade_domain_delta_nodes_check = "UpgradeDomainDeltaNodesCheck" #: Indicates that the health evaluation is for the delta of unhealthy upgrade domain cluster nodes. The value is 20. - application_type_applications = "ApplicationTypeApplications" #: – Indicates that the health evaluation is for applications of an application type. The value is 21. - - -class NodeDeactivationIntent(str, Enum): - - invalid = "Invalid" #: Indicates the node deactivation intent is invalid. All Service Fabric enumerations have the invalid type. The value is zero. This value is not used. - pause = "Pause" #: Indicates that the node should be paused. The value is 1. - restart = "Restart" #: Indicates that the intent is for the node to be restarted after a short period of time. Service Fabric does not restart the node, this action is done outside of Service Fabric. The value is 2. - remove_data = "RemoveData" #: Indicates that the intent is to reimage the node. Service Fabric does not reimage the node, this action is done outside of Service Fabric. The value is 3. - remove_node = "RemoveNode" #: Indicates that the node is being decommissioned and is not expected to return. Service Fabric does not decommission the node, this action is done outside of Service Fabric. The value is 4. - - -class NodeDeactivationStatus(str, Enum): - - none = "None" #: No status is associated with the task. The value is zero. - safety_check_in_progress = "SafetyCheckInProgress" #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe to proceed to ensure availability of the service and reliability of the state. This value indicates that one or more safety checks are in progress. The value is 1. - safety_check_complete = "SafetyCheckComplete" #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe to proceed to ensure availability of the service and reliability of the state. This value indicates that all safety checks have been completed. The value is 2. - completed = "Completed" #: The task is completed. The value is 3. - - -class NodeDeactivationTaskType(str, Enum): - - invalid = "Invalid" #: Indicates the node deactivation task type is invalid. All Service Fabric enumerations have the invalid type. The value is zero. This value is not used. - infrastructure = "Infrastructure" #: Specifies the task created by Infrastructure hosting the nodes. The value is 1. - repair = "Repair" #: Specifies the task that was created by the Repair Manager service. The value is 2. - client = "Client" #: Specifies that the task was created by using the public API. The value is 3. - - -class NodeStatus(str, Enum): - - invalid = "Invalid" #: Indicates the node status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - up = "Up" #: Indicates the node is up. The value is 1. - down = "Down" #: Indicates the node is down. The value is 2. - enabling = "Enabling" #: Indicates the node is in process of being enabled. The value is 3. - disabling = "Disabling" #: Indicates the node is in the process of being disabled. The value is 4. - disabled = "Disabled" #: Indicates the node is disabled. The value is 5. - unknown = "Unknown" #: Indicates the node is unknown. A node would be in Unknown state if Service Fabric does not have authoritative information about that node. This can happen if the system learns about a node at runtime.The value is 6. - removed = "Removed" #: Indicates the node is removed. A node would be in Removed state if NodeStateRemoved API has been called for this node. In other words, Service Fabric has been informed that the persisted state on the node has been permanently lost. The value is 7. - - -class ServicePartitionStatus(str, Enum): - - invalid = "Invalid" #: Indicates the partition status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - ready = "Ready" #: Indicates that the partition is ready. This means that for a stateless service partition there is at least one instance that is up and for a stateful service partition the number of ready replicas is greater than or equal to the MinReplicaSetSize. The value is 1. - not_ready = "NotReady" #: Indicates that the partition is not ready. This status is returned when none of the other states apply. The value is 2. - in_quorum_loss = "InQuorumLoss" #: Indicates that the partition is in quorum loss. This means that number of replicas that are up and participating in a replica set is less than MinReplicaSetSize for this partition. The value is 3. - reconfiguring = "Reconfiguring" #: Indicates that the partition is undergoing reconfiguration of its replica sets. This can happen due to failover, upgrade, load balancing or addition or removal of replicas from the replica set. The value is 4. - deleting = "Deleting" #: Indicates that the partition is being deleted. The value is 5. - - -class ServiceStatus(str, Enum): - - unknown = "Unknown" #: Indicates the service status is unknown. The value is zero. - active = "Active" #: Indicates the service status is active. The value is 1. - upgrading = "Upgrading" #: Indicates the service is upgrading. The value is 2. - deleting = "Deleting" #: Indicates the service is being deleted. The value is 3. - creating = "Creating" #: Indicates the service is being created. The value is 4. - failed = "Failed" #: Indicates creation or deletion was terminated due to persistent failures. Another create/delete request can be accepted. The value is 5. - - -class ProvisionApplicationTypeKind(str, Enum): - - invalid = "Invalid" #: Indicates that the provision kind is invalid. This value is default and should not be used. The value is zero. - image_store_path = "ImageStorePath" #: Indicates that the provision is for a package that was previously uploaded to the image store. The value is 1. - external_store = "ExternalStore" #: Indicates that the provision is for an application package that was previously uploaded to an external store. The application package ends with the extension *.sfpkg. The value is 2. - - -class UpgradeType(str, Enum): - - invalid = "Invalid" #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - rolling = "Rolling" #: The upgrade progresses one upgrade domain at a time. The value is 1. - rolling_force_restart = "Rolling_ForceRestart" #: The upgrade gets restarted by force. The value is 2. - - -class SafetyCheckKind(str, Enum): - - invalid = "Invalid" #: Indicates that the upgrade safety check kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - ensure_seed_node_quorum = "EnsureSeedNodeQuorum" #: Indicates that if we bring down the node then this will result in global seed node quorum loss. The value is 1. - ensure_partition_quorum = "EnsurePartitionQuorum" #: Indicates that there is some partition for which if we bring down the replica on the node, it will result in quorum loss for that partition. The value is 2. - wait_for_primary_placement = "WaitForPrimaryPlacement" #: Indicates that there is some replica on the node that was moved out of this node due to upgrade. Service Fabric is now waiting for the primary to be moved back to this node. The value is 3. - wait_for_primary_swap = "WaitForPrimarySwap" #: Indicates that Service Fabric is waiting for a primary replica to be moved out of the node before starting upgrade on that node. The value is 4. - wait_for_reconfiguration = "WaitForReconfiguration" #: Indicates that there is some replica on the node that is involved in a reconfiguration. Service Fabric is waiting for the reconfiguration to be complete before staring upgrade on that node. The value is 5. - wait_for_inbuild_replica = "WaitForInbuildReplica" #: Indicates that there is either a replica on the node that is going through copy, or there is a primary replica on the node that is copying data to some other replica. In both cases, bringing down the replica on the node due to upgrade will abort the copy. The value is 6. - ensure_availability = "EnsureAvailability" #: Indicates that there is either a stateless service partition on the node having exactly one instance, or there is a primary replica on the node for which the partition is quorum loss. In both cases, bringing down the replicas due to upgrade will result in loss of availability. The value is 7. - - -class CreateFabricDump(str, Enum): - - false = "False" - true = "True" - - -class ServicePackageActivationMode(str, Enum): - - shared_process = "SharedProcess" #: This is the default activation mode. With this activation mode, replicas or instances from different partition(s) of service, on a given node, will share same activation of service package on a node. The value is zero. - exclusive_process = "ExclusiveProcess" #: With this activation mode, each replica or instance of service, on a given node, will have its own dedicated activation of service package on a node. The value is 1. - - -class ServiceKind(str, Enum): - - invalid = "Invalid" #: Indicates the service kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - stateless = "Stateless" #: Does not use Service Fabric to make its state highly available or reliable. The value is 1. - stateful = "Stateful" #: Uses Service Fabric to make its state or part of its state highly available and reliable. The value is 2. - - -class ServicePartitionKind(str, Enum): - - invalid = "Invalid" #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - singleton = "Singleton" #: Indicates that there is only one partition, and SingletonPartitionSchemeDescription was specified while creating the service. The value is 1. - int64_range = "Int64Range" #: Indicates that the partition is based on Int64 key ranges, and UniformInt64RangePartitionSchemeDescription was specified while creating the service. The value is 2. - named = "Named" #: Indicates that the partition is based on string names, and NamedPartitionInformation was specified while creating the service. The value is 3. - - -class ServicePlacementPolicyType(str, Enum): - - invalid = "Invalid" #: Indicates the type of the placement policy is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - invalid_domain = "InvalidDomain" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or upgrade domain cannot be used for placement of this service. The value is 1. - require_domain = "RequireDomain" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the service must be placed in a specific domain. The value is 2. - prefer_primary_domain = "PreferPrimaryDomain" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the Primary replica for the partitions of the service should be located in a particular domain as an optimization. The value is 3. - require_domain_distribution = "RequireDomainDistribution" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementRequireDomainDistributionPolicyDescription, indicating that the system will disallow placement of any two replicas from the same partition in the same domain at any time. The value is 4. - non_partially_place_service = "NonPartiallyPlaceService" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible all replicas of a particular partition of the service should be placed atomically. The value is 5. - allow_multiple_stateless_instances_on_node = "AllowMultipleStatelessInstancesOnNode" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, which indicates that multiple stateless instances of a particular partition of the service can be placed on a node. The value is 6. - - -class ServiceLoadMetricWeight(str, Enum): - - zero = "Zero" #: Disables resource balancing for this metric. This value is zero. - low = "Low" #: Specifies the metric weight of the service load as Low. The value is 1. - medium = "Medium" #: Specifies the metric weight of the service load as Medium. The value is 2. - high = "High" #: Specifies the metric weight of the service load as High. The value is 3. - - -class HostType(str, Enum): - - invalid = "Invalid" #: Indicates the type of host is not known or invalid. The value is 0. - exe_host = "ExeHost" #: Indicates the host is an executable. The value is 1. - container_host = "ContainerHost" #: Indicates the host is a container. The value is 2. - - -class HostIsolationMode(str, Enum): - - none = "None" #: Indicates the isolation mode is not applicable for given HostType. The value is 0. - process = "Process" #: This is the default isolation mode for a ContainerHost. The value is 1. - hyper_v = "HyperV" #: Indicates the ContainerHost is a Hyper-V container. This applies to only Windows containers. The value is 2. - - -class DeploymentStatus(str, Enum): - - invalid = "Invalid" #: Indicates status of the application or service package is not known or invalid. The value is 0. - downloading = "Downloading" #: Indicates the application or service package is being downloaded to the node from the ImageStore. The value is 1. - activating = "Activating" #: Indicates the application or service package is being activated. The value is 2. - active = "Active" #: Indicates the application or service package is active the node. The value is 3. - upgrading = "Upgrading" #: Indicates the application or service package is being upgraded. The value is 4. - deactivating = "Deactivating" #: Indicates the application or service package is being deactivated. The value is 5. - ran_to_completion = "RanToCompletion" #: Indicates the application or service package has ran to completion successfully. The value is 6. - failed = "Failed" #: Indicates the application or service package has failed to run to completion. The value is 7. - - -class EntryPointStatus(str, Enum): - - invalid = "Invalid" #: Indicates status of entry point is not known or invalid. The value is 0. - pending = "Pending" #: Indicates the entry point is scheduled to be started. The value is 1. - starting = "Starting" #: Indicates the entry point is being started. The value is 2. - started = "Started" #: Indicates the entry point was started successfully and is running. The value is 3. - stopping = "Stopping" #: Indicates the entry point is being stopped. The value is 4. - stopped = "Stopped" #: Indicates the entry point is not running. The value is 5. - - -class ChaosStatus(str, Enum): - - invalid = "Invalid" #: Indicates an invalid Chaos status. All Service Fabric enumerations have the invalid type. The value is zero. - running = "Running" #: Indicates that Chaos is not stopped. The value is one. - stopped = "Stopped" #: Indicates that Chaos is not scheduling further faults. The value is two. - - -class ChaosScheduleStatus(str, Enum): - - invalid = "Invalid" #: Indicates an invalid Chaos Schedule status. All Service Fabric enumerations have the invalid type. The value is zero. - stopped = "Stopped" #: Indicates that the schedule is stopped and not being used to schedule runs of chaos. The value is one. - active = "Active" #: Indicates that the schedule is active and is being used to schedule runs of Chaos. The value is two. - expired = "Expired" #: Indicates that the schedule is expired and will no longer be used to schedule runs of Chaos. The value is three. - pending = "Pending" #: Indicates that the schedule is pending and is not yet being used to schedule runs of Chaos but will be used when the start time is passed. The value is four. - - -class ChaosEventKind(str, Enum): - - invalid = "Invalid" #: Indicates an invalid Chaos event kind. All Service Fabric enumerations have the invalid type. - started = "Started" #: Indicates a Chaos event that gets generated when Chaos is started. - executing_faults = "ExecutingFaults" #: Indicates a Chaos event that gets generated when Chaos has decided on the faults for an iteration. This Chaos event contains the details of the faults as a list of strings. - waiting = "Waiting" #: Indicates a Chaos event that gets generated when Chaos is waiting for the cluster to become ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. - validation_failed = "ValidationFailed" #: Indicates a Chaos event that gets generated when the cluster entities do not become stable and healthy within ChaosParameters.MaxClusterStabilizationTimeoutInSeconds. - test_error = "TestError" #: Indicates a Chaos event that gets generated when an unexpected event has occurred in the Chaos engine, for example, due to the cluster snapshot being inconsistent, while faulting a faultable entity Chaos found that the entity was already faulted. - stopped = "Stopped" #: Indicates a Chaos event that gets generated when Chaos stops because either the user issued a stop or the time to run was up. - - -class ComposeDeploymentStatus(str, Enum): - - invalid = "Invalid" #: Indicates that the compose deployment status is invalid. The value is zero. - provisioning = "Provisioning" #: Indicates that the compose deployment is being provisioned in background. The value is 1. - creating = "Creating" #: Indicates that the compose deployment is being created in background. The value is 2. - ready = "Ready" #: Indicates that the compose deployment has been successfully created or upgraded. The value is 3. - unprovisioning = "Unprovisioning" #: Indicates that the compose deployment is being unprovisioned in background. The value is 4. - deleting = "Deleting" #: Indicates that the compose deployment is being deleted in background. The value is 5. - failed = "Failed" #: Indicates that the compose deployment was terminated due to persistent failures. The value is 6. - upgrading = "Upgrading" #: Indicates that the compose deployment is being upgraded in the background. The value is 7. - - -class ComposeDeploymentUpgradeState(str, Enum): - - invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - provisioning_target = "ProvisioningTarget" #: The upgrade is in the progress of provisioning target application type version. The value is 1. - rolling_forward_in_progress = "RollingForwardInProgress" #: The upgrade is rolling forward to the target version but is not complete yet. The value is 2. - rolling_forward_pending = "RollingForwardPending" #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an explicit move next request in UnmonitoredManual mode or performing health checks in Monitored mode. The value is 3 - unprovisioning_current = "UnprovisioningCurrent" #: The upgrade is in the progress of unprovisioning current application type version and rolling forward to the target version is completed. The value is 4. - rolling_forward_completed = "RollingForwardCompleted" #: The upgrade has finished rolling forward. The value is 5. - rolling_back_in_progress = "RollingBackInProgress" #: The upgrade is rolling back to the previous version but is not complete yet. The value is 6. - unprovisioning_target = "UnprovisioningTarget" #: The upgrade is in the progress of unprovisioning target application type version and rolling back to the current version is completed. The value is 7. - rolling_back_completed = "RollingBackCompleted" #: The upgrade has finished rolling back. The value is 8. - failed = "Failed" #: The upgrade has failed and is unable to execute FailureAction. The value is 9. - - -class ServiceCorrelationScheme(str, Enum): - - invalid = "Invalid" #: An invalid correlation scheme. Cannot be used. The value is zero. - affinity = "Affinity" #: Indicates that this service has an affinity relationship with another service. Provided for backwards compatibility, consider preferring the Aligned or NonAlignedAffinity options. The value is 1. - aligned_affinity = "AlignedAffinity" #: Aligned affinity ensures that the primaries of the partitions of the affinitized services are collocated on the same nodes. This is the default and is the same as selecting the Affinity scheme. The value is 2. - non_aligned_affinity = "NonAlignedAffinity" #: Non-Aligned affinity guarantees that all replicas of each service will be placed on the same nodes. Unlike Aligned Affinity, this does not guarantee that replicas of particular role will be collocated. The value is 3. - - -class MoveCost(str, Enum): - - zero = "Zero" #: Zero move cost. This value is zero. - low = "Low" #: Specifies the move cost of the service as Low. The value is 1. - medium = "Medium" #: Specifies the move cost of the service as Medium. The value is 2. - high = "High" #: Specifies the move cost of the service as High. The value is 3. - very_high = "VeryHigh" #: Specifies the move cost of the service as VeryHigh. The value is 4. - - -class PartitionScheme(str, Enum): - - invalid = "Invalid" #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - singleton = "Singleton" #: Indicates that the partition is based on string names, and is a SingletonPartitionSchemeDescription object, The value is 1. - uniform_int64_range = "UniformInt64Range" #: Indicates that the partition is based on Int64 key ranges, and is a UniformInt64RangePartitionSchemeDescription object. The value is 2. - named = "Named" #: Indicates that the partition is based on string names, and is a NamedPartitionSchemeDescription object. The value is 3 - - -class ServiceOperationName(str, Enum): - - unknown = "Unknown" #: Reserved for future use. - none = "None" #: The service replica or instance is not going through any life-cycle changes. - open = "Open" #: The service replica or instance is being opened. - change_role = "ChangeRole" #: The service replica is changing roles. - close = "Close" #: The service replica or instance is being closed. - abort = "Abort" #: The service replica or instance is being aborted. - - -class ReplicatorOperationName(str, Enum): - - invalid = "Invalid" #: Default value if the replicator is not yet ready. - none = "None" #: Replicator is not running any operation from Service Fabric perspective. - open = "Open" #: Replicator is opening. - change_role = "ChangeRole" #: Replicator is in the process of changing its role. - update_epoch = "UpdateEpoch" #: Due to a change in the replica set, replicator is being updated with its Epoch. - close = "Close" #: Replicator is closing. - abort = "Abort" #: Replicator is being aborted. - on_data_loss = "OnDataLoss" #: Replicator is handling the data loss condition, where the user service may potentially be recovering state from an external source. - wait_for_catchup = "WaitForCatchup" #: Replicator is waiting for a quorum of replicas to be caught up to the latest state. - build = "Build" #: Replicator is in the process of building one or more replicas. - - -class PartitionAccessStatus(str, Enum): - - invalid = "Invalid" #: Indicates that the read or write operation access status is not valid. This value is not returned to the caller. - granted = "Granted" #: Indicates that the read or write operation access is granted and the operation is allowed. - reconfiguration_pending = "ReconfigurationPending" #: Indicates that the client should try again later, because a reconfiguration is in progress. - not_primary = "NotPrimary" #: Indicates that this client request was received by a replica that is not a Primary replica. - no_write_quorum = "NoWriteQuorum" #: Indicates that no write quorum is available and, therefore, no write operation can be accepted. - - -class FabricReplicaStatus(str, Enum): - - invalid = "Invalid" #: Indicates that the read or write operation access status is not valid. This value is not returned to the caller. - down = "Down" #: Indicates that the replica is down. - up = "Up" #: Indicates that the replica is up. - - -class ReplicaKind(str, Enum): - - invalid = "Invalid" #: Represents an invalid replica kind. The value is zero. - key_value_store = "KeyValueStore" #: Represents a key value store replica. The value is 1 - - -class ServiceTypeRegistrationStatus(str, Enum): - - invalid = "Invalid" #: Indicates the registration status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - disabled = "Disabled" #: Indicates that the service type is disabled on this node. A type gets disabled when there are too many failures of the code package hosting the service type. If the service type is disabled, new replicas of that service type will not be placed on the node until it is enabled again. The service type is enabled again after the process hosting it comes up and re-registers the type or a preconfigured time interval has passed. The value is 1. - enabled = "Enabled" #: Indicates that the service type is enabled on this node. Replicas of this service type can be placed on this node when the code package registers the service type. The value is 2. - registered = "Registered" #: Indicates that the service type is enabled and registered on the node by a code package. Replicas of this service type can now be placed on this node. The value is 3. - - -class ServiceEndpointRole(str, Enum): - - invalid = "Invalid" #: Indicates the service endpoint role is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - stateless = "Stateless" #: Indicates that the service endpoint is of a stateless service. The value is 1. - stateful_primary = "StatefulPrimary" #: Indicates that the service endpoint is of a primary replica of a stateful service. The value is 2. - stateful_secondary = "StatefulSecondary" #: Indicates that the service endpoint is of a secondary replica of a stateful service. The value is 3. - - -class OperationState(str, Enum): - - invalid = "Invalid" #: The operation state is invalid. - running = "Running" #: The operation is in progress. - rolling_back = "RollingBack" #: The operation is rolling back internal system state because it encountered a fatal error or was cancelled by the user. "RollingBack" does not refer to user state. For example, if CancelOperation is called on a command of type PartitionDataLoss, state of "RollingBack" does not mean service data is being restored (assuming the command has progressed far enough to cause data loss). It means the system is rolling back/cleaning up internal system state associated with the command. - completed = "Completed" #: The operation has completed successfully and is no longer running. - faulted = "Faulted" #: The operation has failed and is no longer running. - cancelled = "Cancelled" #: The operation was cancelled by the user using CancelOperation, and is no longer running. - force_cancelled = "ForceCancelled" #: The operation was cancelled by the user using CancelOperation, with the force parameter set to true. It is no longer running. Refer to CancelOperation for more details. - - -class OperationType(str, Enum): - - invalid = "Invalid" #: The operation state is invalid. - partition_data_loss = "PartitionDataLoss" #: An operation started using the StartDataLoss API. - partition_quorum_loss = "PartitionQuorumLoss" #: An operation started using the StartQuorumLoss API. - partition_restart = "PartitionRestart" #: An operation started using the StartPartitionRestart API. - node_transition = "NodeTransition" #: An operation started using the StartNodeTransition API. - - -class PackageSharingPolicyScope(str, Enum): - - none = "None" #: No package sharing policy scope. The value is 0. - all = "All" #: Share all code, config and data packages from corresponding service manifest. The value is 1. - code = "Code" #: Share all code packages from corresponding service manifest. The value is 2. - config = "Config" #: Share all config packages from corresponding service manifest. The value is 3. - data = "Data" #: Share all data packages from corresponding service manifest. The value is 4. - - -class PropertyValueKind(str, Enum): - - invalid = "Invalid" #: Indicates the property is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - binary = "Binary" #: The data inside the property is a binary blob. The value is 1. - int64 = "Int64" #: The data inside the property is an int64. The value is 2. - double = "Double" #: The data inside the property is a double. The value is 3. - string = "String" #: The data inside the property is a string. The value is 4. - guid = "Guid" #: The data inside the property is a guid. The value is 5. - - -class PropertyBatchOperationKind(str, Enum): - - invalid = "Invalid" #: Indicates the property operation is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - put = "Put" #: The operation will create or edit a property. The value is 1. - get = "Get" #: The operation will get a property. The value is 2. - check_exists = "CheckExists" #: The operation will check that a property exists or doesn't exists, depending on the provided value. The value is 3. - check_sequence = "CheckSequence" #: The operation will ensure that the sequence number is equal to the provided value. The value is 4. - delete = "Delete" #: The operation will delete a property. The value is 5. - check_value = "CheckValue" #: The operation will ensure that the value of a property is equal to the provided value. The value is 7. - - -class PropertyBatchInfoKind(str, Enum): - - invalid = "Invalid" #: Indicates the property batch info is invalid. All Service Fabric enumerations have the invalid type. - successful = "Successful" #: The property batch succeeded. - failed = "Failed" #: The property batch failed. - - -class RetentionPolicyType(str, Enum): - - basic = "Basic" #: Indicates a basic retention policy type. - invalid = "Invalid" #: Indicates an invalid retention policy type. - - -class BackupStorageKind(str, Enum): - - invalid = "Invalid" #: Indicates an invalid backup storage kind. All Service Fabric enumerations have the invalid type. - file_share = "FileShare" #: Indicates file/ SMB share to be used as backup storage. - azure_blob_store = "AzureBlobStore" #: Indicates Azure blob store to be used as backup storage. - dsms_azure_blob_store = "DsmsAzureBlobStore" #: Indicates Dsms Azure blob store to be used as backup storage. - - -class BackupScheduleKind(str, Enum): - - invalid = "Invalid" #: Indicates an invalid backup schedule kind. All Service Fabric enumerations have the invalid type. - time_based = "TimeBased" #: Indicates a time-based backup schedule. - frequency_based = "FrequencyBased" #: Indicates a frequency-based backup schedule. - - -class BackupPolicyScope(str, Enum): - - invalid = "Invalid" #: Indicates an invalid backup policy scope type. All Service Fabric enumerations have the invalid type. - partition = "Partition" #: Indicates the backup policy is applied at partition level. Hence overriding any policy which may have applied at partition's service or application level. - service = "Service" #: Indicates the backup policy is applied at service level. All partitions of the service inherit this policy unless explicitly overridden at partition level. - application = "Application" #: Indicates the backup policy is applied at application level. All services and partitions of the application inherit this policy unless explicitly overridden at service or partition level. - - -class BackupSuspensionScope(str, Enum): - - invalid = "Invalid" #: Indicates an invalid backup suspension scope type also indicating entity is not suspended. All Service Fabric enumerations have the invalid type. - partition = "Partition" #: Indicates the backup suspension is applied at partition level. - service = "Service" #: Indicates the backup suspension is applied at service level. All partitions of the service are hence suspended for backup. - application = "Application" #: Indicates the backup suspension is applied at application level. All services and partitions of the application are hence suspended for backup. - - -class RestoreState(str, Enum): - - invalid = "Invalid" #: Indicates an invalid restore state. All Service Fabric enumerations have the invalid type. - accepted = "Accepted" #: Operation has been validated and accepted. Restore is yet to be triggered. - restore_in_progress = "RestoreInProgress" #: Restore operation has been triggered and is under process. - success = "Success" #: Operation completed with success. - failure = "Failure" #: Operation completed with failure. - timeout = "Timeout" #: Operation timed out. - - -class BackupType(str, Enum): - - invalid = "Invalid" #: Indicates an invalid backup type. All Service Fabric enumerations have the invalid type. - full = "Full" #: Indicates a full backup. - incremental = "Incremental" #: Indicates an incremental backup. A backup chain is comprised of a full backup followed by 0 or more incremental backups. - - -class BackupScheduleFrequencyType(str, Enum): - - invalid = "Invalid" #: Indicates an invalid backup schedule frequency type. All Service Fabric enumerations have the invalid type. - daily = "Daily" #: Indicates that the time based backup schedule is repeated at a daily frequency. - weekly = "Weekly" #: Indicates that the time based backup schedule is repeated at a weekly frequency. - - -class DayOfWeek(str, Enum): - - sunday = "Sunday" #: Indicates the Day referred is Sunday. - monday = "Monday" #: Indicates the Day referred is Monday. - tuesday = "Tuesday" #: Indicates the Day referred is Tuesday. - wednesday = "Wednesday" #: Indicates the Day referred is Wednesday. - thursday = "Thursday" #: Indicates the Day referred is Thursday. - friday = "Friday" #: Indicates the Day referred is Friday. - saturday = "Saturday" #: Indicates the Day referred is Saturday. - - -class BackupState(str, Enum): - - invalid = "Invalid" #: Indicates an invalid backup state. All Service Fabric enumerations have the invalid type. - accepted = "Accepted" #: Operation has been validated and accepted. Backup is yet to be triggered. - backup_in_progress = "BackupInProgress" #: Backup operation has been triggered and is under process. - success = "Success" #: Operation completed with success. - failure = "Failure" #: Operation completed with failure. - timeout = "Timeout" #: Operation timed out. - - -class BackupEntityKind(str, Enum): - - invalid = "Invalid" #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. - partition = "Partition" #: Indicates the entity is a Service Fabric partition. - service = "Service" #: Indicates the entity is a Service Fabric service. - application = "Application" #: Indicates the entity is a Service Fabric application. - - -class ImpactLevel(str, Enum): - - invalid = "Invalid" - none = "None" - restart = "Restart" - remove_data = "RemoveData" - remove_node = "RemoveNode" - - -class RepairImpactKind(str, Enum): - - invalid = "Invalid" #: The repair impact is not valid or is of an unknown type. - node = "Node" #: The repair impact affects a set of Service Fabric nodes. - - -class RepairTargetKind(str, Enum): - - invalid = "Invalid" #: The repair target is not valid or is of an unknown type. - node = "Node" #: The repair target is a set of Service Fabric nodes. - - -class State(str, Enum): - - invalid = "Invalid" #: Indicates that the repair task state is invalid. All Service Fabric enumerations have the invalid value. - created = "Created" #: Indicates that the repair task has been created. - claimed = "Claimed" #: Indicates that the repair task has been claimed by a repair executor. - preparing = "Preparing" #: Indicates that the Repair Manager is preparing the system to handle the impact of the repair task, usually by taking resources offline gracefully. - approved = "Approved" #: Indicates that the repair task has been approved by the Repair Manager and is safe to execute. - executing = "Executing" #: Indicates that execution of the repair task is in progress. - restoring = "Restoring" #: Indicates that the Repair Manager is restoring the system to its pre-repair state, usually by bringing resources back online. - completed = "Completed" #: Indicates that the repair task has completed, and no further state changes will occur. - - -class ResultStatus(str, Enum): - - invalid = "Invalid" #: Indicates that the repair task result is invalid. All Service Fabric enumerations have the invalid value. - succeeded = "Succeeded" #: Indicates that the repair task completed execution successfully. - cancelled = "Cancelled" #: Indicates that the repair task was cancelled prior to execution. - interrupted = "Interrupted" #: Indicates that execution of the repair task was interrupted by a cancellation request after some work had already been performed. - failed = "Failed" #: Indicates that there was a failure during execution of the repair task. Some work may have been performed. - pending = "Pending" #: Indicates that the repair task result is not yet available, because the repair task has not finished executing. - - -class RepairTaskHealthCheckState(str, Enum): - - not_started = "NotStarted" #: Indicates that the health check has not started. - in_progress = "InProgress" #: Indicates that the health check is in progress. - succeeded = "Succeeded" #: Indicates that the health check succeeded. - skipped = "Skipped" #: Indicates that the health check was skipped. - timed_out = "TimedOut" #: Indicates that the health check timed out. - - -class ScalingTriggerKind(str, Enum): - - invalid = "Invalid" #: Indicates the scaling trigger is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - average_partition_load = "AveragePartitionLoad" #: Indicates a trigger where scaling decisions are made based on average load of a partition. The value is 1. - average_service_load = "AverageServiceLoad" #: Indicates a trigger where scaling decisions are made based on average load of a service. The value is 2. - - -class ScalingMechanismKind(str, Enum): - - invalid = "Invalid" #: Indicates the scaling mechanism is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - partition_instance_count = "PartitionInstanceCount" #: Indicates a mechanism for scaling where new instances are added or removed from a partition. The value is 1. - add_remove_incremental_named_partition = "AddRemoveIncrementalNamedPartition" #: Indicates a mechanism for scaling where new named partitions are added or removed from a service. The value is 2. - - -class ResourceStatus(str, Enum): - - unknown = "Unknown" #: Indicates the resource status is unknown. The value is zero. - ready = "Ready" #: Indicates the resource is ready. The value is 1. - upgrading = "Upgrading" #: Indicates the resource is upgrading. The value is 2. - creating = "Creating" #: Indicates the resource is being created. The value is 3. - deleting = "Deleting" #: Indicates the resource is being deleted. The value is 4. - failed = "Failed" #: Indicates the resource is not functional due to persistent failures. See statusDetails property for more details. The value is 5. - - -class SecretKind(str, Enum): - - inlined_value = "inlinedValue" #: A simple secret resource whose plaintext value is provided by the user. - key_vault_versioned_reference = "keyVaultVersionedReference" #: A secret resource that references a specific version of a secret stored in Azure Key Vault; the expected value is a versioned KeyVault URI corresponding to the version of the secret being referenced. - - -class VolumeProvider(str, Enum): - - sf_azure_file = "SFAzureFile" #: Provides volumes that are backed by Azure Files. - - -class SizeTypes(str, Enum): - - small = "Small" - medium = "Medium" - large = "Large" - - -class ApplicationScopedVolumeKind(str, Enum): - - service_fabric_volume_disk = "ServiceFabricVolumeDisk" #: Provides Service Fabric High Availability Volume Disk - - -class NetworkKind(str, Enum): - - local = "Local" #: Indicates a container network local to a single Service Fabric cluster. The value is 1. - - -class HeaderMatchType(str, Enum): - - exact = "exact" - - -class OperatingSystemType(str, Enum): - - linux = "Linux" #: The required operating system is Linux. - windows = "Windows" #: The required operating system is Windows. - - -class ImageRegistryPasswordType(str, Enum): - - clear_text = "ClearText" #: The image registry password in clear text, will not be processed in any way and used directly - key_vault_reference = "KeyVaultReference" #: The URI to a KeyVault secret version, will be resolved using the application's managed identity (this type is only valid if the app was assigned a managed identity) before getting used - secret_value_reference = "SecretValueReference" #: The reference to a SecretValue resource, will be resolved before getting used - - -class EnvironmentVariableType(str, Enum): - - clear_text = "ClearText" #: The environment variable in clear text, will not be processed in any way and passed in as is - key_vault_reference = "KeyVaultReference" #: The URI to a KeyVault secret version, will be resolved using the application's managed identity (this type is only valid if the app was assigned a managed identity) before getting passed in - secret_value_reference = "SecretValueReference" #: The reference to a SecretValue resource, will be resolved before getting passed in - - -class SettingType(str, Enum): - - clear_text = "ClearText" #: The setting in clear text, will not be processed in any way and passed in as is - key_vault_reference = "KeyVaultReference" #: The URI to a KeyVault secret version, will be resolved using the application's managed identity (this type is only valid if the app was assigned a managed identity) before getting passed in - secret_value_reference = "SecretValueReference" #: The reference to a SecretValue resource, will be resolved before getting passed in - - -class Scheme(str, Enum): - - http = "http" #: Indicates that the probe is http. - https = "https" #: Indicates that the probe is https. No cert validation. - - -class ApplicationResourceUpgradeState(str, Enum): - - invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is 0. - provisioning_target = "ProvisioningTarget" #: The upgrade is in the progress of provisioning target application type version. The value is 1. - rolling_forward = "RollingForward" #: The upgrade is rolling forward to the target version but is not complete yet. The value is 2. - unprovisioning_current = "UnprovisioningCurrent" #: The upgrade is in the progress of unprovisioning current application type version and rolling forward to the target version is completed. The value is 3. - completed_rollforward = "CompletedRollforward" #: The upgrade has finished rolling forward. The value is 4. - rolling_back = "RollingBack" #: The upgrade is rolling back to the previous version but is not complete yet. The value is 5. - unprovisioning_target = "UnprovisioningTarget" #: The upgrade is in the progress of unprovisioning target application type version and rolling back to the current version is completed. The value is 6. - completed_rollback = "CompletedRollback" #: The upgrade has finished rolling back. The value is 7. - failed = "Failed" #: The upgrade has failed and is unable to execute FailureAction. The value is 8. - - -class RollingUpgradeMode(str, Enum): - - invalid = "Invalid" #: Indicates the upgrade mode is invalid. All Service Fabric enumerations have the invalid type. The value is zero. - unmonitored_auto = "UnmonitoredAuto" #: The upgrade will proceed automatically without performing any health monitoring. The value is 1 - unmonitored_manual = "UnmonitoredManual" #: The upgrade will stop after completing each upgrade domain, giving the opportunity to manually monitor health before proceeding. The value is 2 - monitored = "Monitored" #: The upgrade will stop after completing each upgrade domain and automatically monitor health before proceeding. The value is 3 - - -class DiagnosticsSinkKind(str, Enum): - - invalid = "Invalid" #: Indicates an invalid sink kind. All Service Fabric enumerations have the invalid type. - azure_internal_monitoring_pipeline = "AzureInternalMonitoringPipeline" #: Diagnostics settings for Geneva. - - -class AutoScalingMechanismKind(str, Enum): - - add_remove_replica = "AddRemoveReplica" #: Indicates that scaling should be performed by adding or removing replicas. - - -class AutoScalingMetricKind(str, Enum): - - resource = "Resource" #: Indicates that the metric is one of resources, like cpu or memory. - - -class AutoScalingResourceMetricName(str, Enum): - - cpu = "cpu" #: Indicates that the resource is CPU cores. - memory_in_gb = "memoryInGB" #: Indicates that the resource is memory in GB. - - -class AutoScalingTriggerKind(str, Enum): - - average_load = "AverageLoad" #: Indicates that scaling should be performed based on average load of all replicas in the service. - - -class ExecutionPolicyType(str, Enum): - - default = "Default" #: Indicates the default execution policy, always restart the service if an exit occurs. - run_to_completion = "RunToCompletion" #: Indicates that the service will perform its desired operation and complete successfully. If the service encounters failure, it will restarted based on restart policy specified. If the service completes its operation successfully, it will not be restarted again. - - -class RestartPolicy(str, Enum): - - on_failure = "OnFailure" #: Service will be restarted when it encounters a failure. - never = "Never" #: Service will never be restarted. If the service encounters a failure, it will move to Failed state. - - -class NodeStatusFilter(str, Enum): - - default = "default" #: This filter value will match all of the nodes excepts the ones with status as Unknown or Removed. - all = "all" #: This filter value will match all of the nodes. - up = "up" #: This filter value will match nodes that are Up. - down = "down" #: This filter value will match nodes that are Down. - enabling = "enabling" #: This filter value will match nodes that are in the process of being enabled with status as Enabling. - disabling = "disabling" #: This filter value will match nodes that are in the process of being disabled with status as Disabling. - disabled = "disabled" #: This filter value will match nodes that are Disabled. - unknown = "unknown" #: This filter value will match nodes whose status is Unknown. A node would be in Unknown state if Service Fabric does not have authoritative information about that node. This can happen if the system learns about a node at runtime. - removed = "removed" #: This filter value will match nodes whose status is Removed. These are the nodes that are removed from the cluster using the RemoveNodeState API. - - -class ReplicaHealthReportServiceKind(str, Enum): - - stateless = "Stateless" #: Does not use Service Fabric to make its state highly available or reliable. The value is 1 - stateful = "Stateful" #: Uses Service Fabric to make its state or part of its state highly available and reliable. The value is 2. - - -class DataLossMode(str, Enum): - - invalid = "Invalid" #: Reserved. Do not pass into API. - partial_data_loss = "PartialDataLoss" #: PartialDataLoss option will cause a quorum of replicas to go down, triggering an OnDataLoss event in the system for the given partition. - full_data_loss = "FullDataLoss" #: FullDataLoss option will drop all the replicas which means that all the data will be lost. - - -class NodeTransitionType(str, Enum): - - invalid = "Invalid" #: Reserved. Do not pass into API. - start = "Start" #: Transition a stopped node to up. - stop = "Stop" #: Transition an up node to stopped. - - -class QuorumLossMode(str, Enum): - - invalid = "Invalid" #: Reserved. Do not pass into API. - quorum_replicas = "QuorumReplicas" #: Partial Quorum loss mode : Minimum number of replicas for a partition will be down that will cause a quorum loss. - all_replicas = "AllReplicas" - - -class RestartPartitionMode(str, Enum): - - invalid = "Invalid" #: Reserved. Do not pass into API. - all_replicas_or_instances = "AllReplicasOrInstances" #: All replicas or instances in the partition are restarted at once. - only_active_secondaries = "OnlyActiveSecondaries" #: Only the secondary replicas are restarted. diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_apis_enums.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_apis_enums.py new file mode 100644 index 000000000000..6507720047ec --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_apis_enums.py @@ -0,0 +1,2092 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class ApplicationDefinitionKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The mechanism used to define a Service Fabric application. + """ + + #: Indicates the application definition kind is invalid. All Service Fabric enumerations have the + #: invalid type. The value is 65535. + INVALID = "Invalid" + #: Indicates the application is defined by a Service Fabric application description. The value is + #: 0. + SERVICE_FABRIC_APPLICATION_DESCRIPTION = "ServiceFabricApplicationDescription" + #: Indicates the application is defined by compose file(s). The value is 1. + COMPOSE = "Compose" + +class ApplicationPackageCleanupPolicy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of action that needs to be taken for cleaning up the application package after + successful provision. + """ + + #: Indicates that the application package cleanup policy is invalid. This value is default. The + #: value is zero. + INVALID = "Invalid" + #: Indicates that the cleanup policy of application packages is based on the cluster setting + #: "CleanupApplicationPackageOnProvisionSuccess." The value is 1. + DEFAULT = "Default" + #: Indicates that the service fabric runtime determines when to do the application package + #: cleanup. By default, cleanup is done on successful provision. The value is 2. + AUTOMATIC = "Automatic" + #: Indicates that the user has to explicitly clean up the application package. The value is 3. + MANUAL = "Manual" + +class ApplicationResourceUpgradeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The state of the application resource upgrade. + """ + + #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. + #: The value is 0. + INVALID = "Invalid" + #: The upgrade is in the progress of provisioning target application type version. The value is 1. + PROVISIONING_TARGET = "ProvisioningTarget" + #: The upgrade is rolling forward to the target version but is not complete yet. The value is 2. + ROLLING_FORWARD = "RollingForward" + #: The upgrade is in the progress of unprovisioning current application type version and rolling + #: forward to the target version is completed. The value is 3. + UNPROVISIONING_CURRENT = "UnprovisioningCurrent" + #: The upgrade has finished rolling forward. The value is 4. + COMPLETED_ROLLFORWARD = "CompletedRollforward" + #: The upgrade is rolling back to the previous version but is not complete yet. The value is 5. + ROLLING_BACK = "RollingBack" + #: The upgrade is in the progress of unprovisioning target application type version and rolling + #: back to the current version is completed. The value is 6. + UNPROVISIONING_TARGET = "UnprovisioningTarget" + #: The upgrade has finished rolling back. The value is 7. + COMPLETED_ROLLBACK = "CompletedRollback" + #: The upgrade has failed and is unable to execute FailureAction. The value is 8. + FAILED = "Failed" + +class ApplicationScopedVolumeKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the application-scoped volume kind. + """ + + #: Provides Service Fabric High Availability Volume Disk. + SERVICE_FABRIC_VOLUME_DISK = "ServiceFabricVolumeDisk" + +class ApplicationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the application. + """ + + #: Indicates the application status is invalid. All Service Fabric enumerations have the invalid + #: type. The value is zero. + INVALID = "Invalid" + #: Indicates the application status is ready. The value is 1. + READY = "Ready" + #: Indicates the application status is upgrading. The value is 2. + UPGRADING = "Upgrading" + #: Indicates the application status is creating. The value is 3. + CREATING = "Creating" + #: Indicates the application status is deleting. The value is 4. + DELETING = "Deleting" + #: Indicates the creation or deletion of application was terminated due to persistent failures. + #: Another create/delete request can be accepted to resume a failed application. The value is 5. + FAILED = "Failed" + +class ApplicationTypeDefinitionKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The mechanism used to define a Service Fabric application type. + """ + + #: Indicates the application type definition kind is invalid. All Service Fabric enumerations have + #: the invalid type. The value is 0. + INVALID = "Invalid" + #: Indicates the application type is defined and created by a Service Fabric application package + #: provided by the user. The value is 1. + SERVICE_FABRIC_APPLICATION_PACKAGE = "ServiceFabricApplicationPackage" + #: Indicates the application type is defined and created implicitly as part of a compose + #: deployment. The value is 2. + COMPOSE = "Compose" + +class ApplicationTypeStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the application type. + """ + + #: Indicates the application type status is invalid. All Service Fabric enumerations have the + #: invalid type. The value is zero. + INVALID = "Invalid" + #: Indicates that the application type is being provisioned in the cluster. The value is 1. + PROVISIONING = "Provisioning" + #: Indicates that the application type is fully provisioned and is available for use. An + #: application of this type and version can be created. The value is 2. + AVAILABLE = "Available" + #: Indicates that the application type is in process of being unprovisioned from the cluster. The + #: value is 3. + UNPROVISIONING = "Unprovisioning" + #: Indicates that the application type provisioning failed and it is unavailable for use. The + #: failure details can be obtained from the application type information query. The failed + #: application type information remains in the cluster until it is unprovisioned or reprovisioned + #: successfully. The value is 4. + FAILED = "Failed" + +class AutoScalingMechanismKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumerates the mechanisms for auto scaling. + """ + + #: Indicates that scaling should be performed by adding or removing replicas. + ADD_REMOVE_REPLICA = "AddRemoveReplica" + +class AutoScalingMetricKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumerates the metrics that are used for triggering auto scaling. + """ + + #: Indicates that the metric is one of resources, like cpu or memory. + RESOURCE = "Resource" + +class AutoScalingResourceMetricName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumerates the resources that are used for triggering auto scaling. + """ + + #: Indicates that the resource is CPU cores. + CPU = "cpu" + #: Indicates that the resource is memory in GB. + MEMORY_IN_GB = "memoryInGB" + +class AutoScalingTriggerKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumerates the triggers for auto scaling. + """ + + #: Indicates that scaling should be performed based on average load of all replicas in the + #: service. + AVERAGE_LOAD = "AverageLoad" + +class BackupEntityKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The entity type of a Service Fabric entity such as Application, Service or a Partition where + periodic backups can be enabled. + """ + + #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. + INVALID = "Invalid" + #: Indicates the entity is a Service Fabric partition. + PARTITION = "Partition" + #: Indicates the entity is a Service Fabric service. + SERVICE = "Service" + #: Indicates the entity is a Service Fabric application. + APPLICATION = "Application" + +class BackupPolicyScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the scope at which the backup policy is applied. + """ + + #: Indicates an invalid backup policy scope type. All Service Fabric enumerations have the invalid + #: type. + INVALID = "Invalid" + #: Indicates the backup policy is applied at partition level. Hence overriding any policy which + #: may have applied at partition's service or application level. + PARTITION = "Partition" + #: Indicates the backup policy is applied at service level. All partitions of the service inherit + #: this policy unless explicitly overridden at partition level. + SERVICE = "Service" + #: Indicates the backup policy is applied at application level. All services and partitions of the + #: application inherit this policy unless explicitly overridden at service or partition level. + APPLICATION = "Application" + +class BackupScheduleFrequencyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the frequency with which to run the time based backup schedule. + """ + + #: Indicates an invalid backup schedule frequency type. All Service Fabric enumerations have the + #: invalid type. + INVALID = "Invalid" + #: Indicates that the time based backup schedule is repeated at a daily frequency. + DAILY = "Daily" + #: Indicates that the time based backup schedule is repeated at a weekly frequency. + WEEKLY = "Weekly" + +class BackupScheduleKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of backup schedule, time based or frequency based. + """ + + #: Indicates an invalid backup schedule kind. All Service Fabric enumerations have the invalid + #: type. + INVALID = "Invalid" + #: Indicates a time-based backup schedule. + TIME_BASED = "TimeBased" + #: Indicates a frequency-based backup schedule. + FREQUENCY_BASED = "FrequencyBased" + +class BackupState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Represents the current state of the partition backup operation. + """ + + #: Indicates an invalid backup state. All Service Fabric enumerations have the invalid type. + INVALID = "Invalid" + #: Operation has been validated and accepted. Backup is yet to be triggered. + ACCEPTED = "Accepted" + #: Backup operation has been triggered and is under process. + BACKUP_IN_PROGRESS = "BackupInProgress" + #: Operation completed with success. + SUCCESS = "Success" + #: Operation completed with failure. + FAILURE = "Failure" + #: Operation timed out. + TIMEOUT = "Timeout" + +class BackupStorageKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of backup storage, where backups are saved. + """ + + #: Indicates an invalid backup storage kind. All Service Fabric enumerations have the invalid + #: type. + INVALID = "Invalid" + #: Indicates file/ SMB share to be used as backup storage. + FILE_SHARE = "FileShare" + #: Indicates Azure blob store to be used as backup storage. + AZURE_BLOB_STORE = "AzureBlobStore" + #: Indicates Dsms Azure blob store to be used as backup storage. + DSMS_AZURE_BLOB_STORE = "DsmsAzureBlobStore" + #: Indicates Azure blob store to be used as backup storage using managed identity. + MANAGED_IDENTITY_AZURE_BLOB_STORE = "ManagedIdentityAzureBlobStore" + +class BackupSuspensionScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the scope at which the backup suspension was applied. + """ + + #: Indicates an invalid backup suspension scope type also indicating entity is not suspended. All + #: Service Fabric enumerations have the invalid type. + INVALID = "Invalid" + #: Indicates the backup suspension is applied at partition level. + PARTITION = "Partition" + #: Indicates the backup suspension is applied at service level. All partitions of the service are + #: hence suspended for backup. + SERVICE = "Service" + #: Indicates the backup suspension is applied at application level. All services and partitions of + #: the application are hence suspended for backup. + APPLICATION = "Application" + +class BackupType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the type of backup, whether its full or incremental. + """ + + #: Indicates an invalid backup type. All Service Fabric enumerations have the invalid type. + INVALID = "Invalid" + #: Indicates a full backup. + FULL = "Full" + #: Indicates an incremental backup. A backup chain is comprised of a full backup followed by 0 or + #: more incremental backups. + INCREMENTAL = "Incremental" + +class ChaosEventKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of Chaos event. + """ + + #: Indicates an invalid Chaos event kind. All Service Fabric enumerations have the invalid type. + INVALID = "Invalid" + #: Indicates a Chaos event that gets generated when Chaos is started. + STARTED = "Started" + #: Indicates a Chaos event that gets generated when Chaos has decided on the faults for an + #: iteration. This Chaos event contains the details of the faults as a list of strings. + EXECUTING_FAULTS = "ExecutingFaults" + #: Indicates a Chaos event that gets generated when Chaos is waiting for the cluster to become + #: ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. + WAITING = "Waiting" + #: Indicates a Chaos event that gets generated when the cluster entities do not become stable and + #: healthy within ChaosParameters.MaxClusterStabilizationTimeoutInSeconds. + VALIDATION_FAILED = "ValidationFailed" + #: Indicates a Chaos event that gets generated when an unexpected event has occurred in the Chaos + #: engine, for example, due to the cluster snapshot being inconsistent, while faulting a faultable + #: entity Chaos found that the entity was already faulted. + TEST_ERROR = "TestError" + #: Indicates a Chaos event that gets generated when Chaos stops because either the user issued a + #: stop or the time to run was up. + STOPPED = "Stopped" + +class ChaosScheduleStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Current status of the schedule. + """ + + #: Indicates an invalid Chaos Schedule status. All Service Fabric enumerations have the invalid + #: type. The value is zero. + INVALID = "Invalid" + #: Indicates that the schedule is stopped and not being used to schedule runs of chaos. The value + #: is one. + STOPPED = "Stopped" + #: Indicates that the schedule is active and is being used to schedule runs of Chaos. The value is + #: two. + ACTIVE = "Active" + #: Indicates that the schedule is expired and will no longer be used to schedule runs of Chaos. + #: The value is three. + EXPIRED = "Expired" + #: Indicates that the schedule is pending and is not yet being used to schedule runs of Chaos but + #: will be used when the start time is passed. The value is four. + PENDING = "Pending" + +class ChaosStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Current status of the Chaos run. + """ + + #: Indicates an invalid Chaos status. All Service Fabric enumerations have the invalid type. The + #: value is zero. + INVALID = "Invalid" + #: Indicates that Chaos is not stopped. The value is one. + RUNNING = "Running" + #: Indicates that Chaos is not scheduling further faults. The value is two. + STOPPED = "Stopped" + +class ComposeDeploymentStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the compose deployment. + """ + + #: Indicates that the compose deployment status is invalid. The value is zero. + INVALID = "Invalid" + #: Indicates that the compose deployment is being provisioned in background. The value is 1. + PROVISIONING = "Provisioning" + #: Indicates that the compose deployment is being created in background. The value is 2. + CREATING = "Creating" + #: Indicates that the compose deployment has been successfully created or upgraded. The value is + #: 3. + READY = "Ready" + #: Indicates that the compose deployment is being unprovisioned in background. The value is 4. + UNPROVISIONING = "Unprovisioning" + #: Indicates that the compose deployment is being deleted in background. The value is 5. + DELETING = "Deleting" + #: Indicates that the compose deployment was terminated due to persistent failures. The value is + #: 6. + FAILED = "Failed" + #: Indicates that the compose deployment is being upgraded in the background. The value is 7. + UPGRADING = "Upgrading" + +class ComposeDeploymentUpgradeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The state of the compose deployment upgrade. + """ + + #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: The upgrade is in the progress of provisioning target application type version. The value is 1. + PROVISIONING_TARGET = "ProvisioningTarget" + #: The upgrade is rolling forward to the target version but is not complete yet. The value is 2. + ROLLING_FORWARD_IN_PROGRESS = "RollingForwardInProgress" + #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an + #: explicit move next request in UnmonitoredManual mode or performing health checks in Monitored + #: mode. The value is 3. + ROLLING_FORWARD_PENDING = "RollingForwardPending" + #: The upgrade is in the progress of unprovisioning current application type version and rolling + #: forward to the target version is completed. The value is 4. + UNPROVISIONING_CURRENT = "UnprovisioningCurrent" + #: The upgrade has finished rolling forward. The value is 5. + ROLLING_FORWARD_COMPLETED = "RollingForwardCompleted" + #: The upgrade is rolling back to the previous version but is not complete yet. The value is 6. + ROLLING_BACK_IN_PROGRESS = "RollingBackInProgress" + #: The upgrade is in the progress of unprovisioning target application type version and rolling + #: back to the current version is completed. The value is 7. + UNPROVISIONING_TARGET = "UnprovisioningTarget" + #: The upgrade has finished rolling back. The value is 8. + ROLLING_BACK_COMPLETED = "RollingBackCompleted" + #: The upgrade has failed and is unable to execute FailureAction. The value is 9. + FAILED = "Failed" + +class CreateFabricDump(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specify True to create a dump of the fabric node process. This is case-sensitive. + """ + + FALSE = "False" + TRUE = "True" + +class DataLossMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + #: Reserved. Do not pass into API. + INVALID = "Invalid" + #: PartialDataLoss option will cause a quorum of replicas to go down, triggering an OnDataLoss + #: event in the system for the given partition. + PARTIAL_DATA_LOSS = "PartialDataLoss" + #: FullDataLoss option will drop all the replicas which means that all the data will be lost. + FULL_DATA_LOSS = "FullDataLoss" + +class DayOfWeek(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the days in a week. + """ + + #: Indicates the Day referred is Sunday. + SUNDAY = "Sunday" + #: Indicates the Day referred is Monday. + MONDAY = "Monday" + #: Indicates the Day referred is Tuesday. + TUESDAY = "Tuesday" + #: Indicates the Day referred is Wednesday. + WEDNESDAY = "Wednesday" + #: Indicates the Day referred is Thursday. + THURSDAY = "Thursday" + #: Indicates the Day referred is Friday. + FRIDAY = "Friday" + #: Indicates the Day referred is Saturday. + SATURDAY = "Saturday" + +class DeactivationIntent(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the intent or reason for deactivating the node. The possible values are following. + """ + + #: Indicates that the node should be paused. The value is 1. + PAUSE = "Pause" + #: Indicates that the intent is for the node to be restarted after a short period of time. The + #: value is 2. + RESTART = "Restart" + #: Indicates the intent is for the node to remove data. The value is 3. + REMOVE_DATA = "RemoveData" + +class DeployedApplicationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the application deployed on the node. Following are the possible values. + """ + + #: Indicates that deployment status is not valid. All Service Fabric enumerations have the invalid + #: type. The value is zero. + INVALID = "Invalid" + #: Indicates that the package is downloading from the ImageStore. The value is 1. + DOWNLOADING = "Downloading" + #: Indicates that the package is activating. The value is 2. + ACTIVATING = "Activating" + #: Indicates that the package is active. The value is 3. + ACTIVE = "Active" + #: Indicates that the package is upgrading. The value is 4. + UPGRADING = "Upgrading" + #: Indicates that the package is deactivating. The value is 5. + DEACTIVATING = "Deactivating" + +class DeploymentStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the status of a deployed application or service package on a Service Fabric node. + """ + + #: Indicates status of the application or service package is not known or invalid. The value is 0. + INVALID = "Invalid" + #: Indicates the application or service package is being downloaded to the node from the + #: ImageStore. The value is 1. + DOWNLOADING = "Downloading" + #: Indicates the application or service package is being activated. The value is 2. + ACTIVATING = "Activating" + #: Indicates the application or service package is active the node. The value is 3. + ACTIVE = "Active" + #: Indicates the application or service package is being upgraded. The value is 4. + UPGRADING = "Upgrading" + #: Indicates the application or service package is being deactivated. The value is 5. + DEACTIVATING = "Deactivating" + #: Indicates the application or service package has ran to completion successfully. The value is + #: 6. + RAN_TO_COMPLETION = "RanToCompletion" + #: Indicates the application or service package has failed to run to completion. The value is 7. + FAILED = "Failed" + +class DiagnosticsSinkKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of DiagnosticsSink. + """ + + #: Indicates an invalid sink kind. All Service Fabric enumerations have the invalid type. + INVALID = "Invalid" + #: Diagnostics settings for Geneva. + AZURE_INTERNAL_MONITORING_PIPELINE = "AzureInternalMonitoringPipeline" + +class EntityKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The entity type of a Service Fabric entity such as Cluster, Node, Application, Service, + Partition, Replica etc. + """ + + #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. The + #: value is zero. + INVALID = "Invalid" + #: Indicates the entity is a Service Fabric node. The value is 1. + NODE = "Node" + #: Indicates the entity is a Service Fabric partition. The value is 2. + PARTITION = "Partition" + #: Indicates the entity is a Service Fabric service. The value is 3. + SERVICE = "Service" + #: Indicates the entity is a Service Fabric application. The value is 4. + APPLICATION = "Application" + #: Indicates the entity is a Service Fabric replica. The value is 5. + REPLICA = "Replica" + #: Indicates the entity is a Service Fabric deployed application. The value is 6. + DEPLOYED_APPLICATION = "DeployedApplication" + #: Indicates the entity is a Service Fabric deployed service package. The value is 7. + DEPLOYED_SERVICE_PACKAGE = "DeployedServicePackage" + #: Indicates the entity is a Service Fabric cluster. The value is 8. + CLUSTER = "Cluster" + +class EntryPointStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the status of the code package entry point deployed on a Service Fabric node. + """ + + #: Indicates status of entry point is not known or invalid. The value is 0. + INVALID = "Invalid" + #: Indicates the entry point is scheduled to be started. The value is 1. + PENDING = "Pending" + #: Indicates the entry point is being started. The value is 2. + STARTING = "Starting" + #: Indicates the entry point was started successfully and is running. The value is 3. + STARTED = "Started" + #: Indicates the entry point is being stopped. The value is 4. + STOPPING = "Stopping" + #: Indicates the entry point is not running. The value is 5. + STOPPED = "Stopped" + +class EnvironmentVariableType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of the environment variable being given in value + """ + + #: The environment variable in clear text, will not be processed in any way and passed in as is. + CLEAR_TEXT = "ClearText" + #: The URI to a KeyVault secret version, will be resolved using the application's managed identity + #: (this type is only valid if the app was assigned a managed identity) before getting passed in. + KEY_VAULT_REFERENCE = "KeyVaultReference" + #: The reference to a SecretValue resource, will be resolved before getting passed in. + SECRET_VALUE_REFERENCE = "SecretValueReference" + +class ExecutionPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumerates the execution policy types for services. + """ + + #: Indicates the default execution policy, always restart the service if an exit occurs. + DEFAULT = "Default" + #: Indicates that the service will perform its desired operation and complete successfully. If the + #: service encounters failure, it will restarted based on restart policy specified. If the service + #: completes its operation successfully, it will not be restarted again. + RUN_TO_COMPLETION = "RunToCompletion" + +class FabricErrorCodes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Defines the fabric error codes that be returned as part of the error object in response to + Service Fabric API operations that are not successful. Following are the error code values that + can be returned for a specific HTTP status code. + + + * + Possible values of the error code for HTTP status code 400 (Bad Request) + + + * "FABRIC_E_INVALID_PARTITION_KEY" + * "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" + * "FABRIC_E_INVALID_ADDRESS" + * "FABRIC_E_APPLICATION_NOT_UPGRADING" + * "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" + * "FABRIC_E_FABRIC_NOT_UPGRADING" + * "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" + * "FABRIC_E_INVALID_CONFIGURATION" + * "FABRIC_E_INVALID_NAME_URI" + * "FABRIC_E_PATH_TOO_LONG" + * "FABRIC_E_KEY_TOO_LARGE" + * "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" + * "FABRIC_E_INVALID_ATOMIC_GROUP" + * "FABRIC_E_VALUE_EMPTY" + * "FABRIC_E_BACKUP_IS_ENABLED" + * "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + * "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + * "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + * "E_INVALIDARG" + + * + Possible values of the error code for HTTP status code 404 (Not Found) + + + * "FABRIC_E_NODE_NOT_FOUND" + * "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" + * "FABRIC_E_APPLICATION_NOT_FOUND" + * "FABRIC_E_SERVICE_TYPE_NOT_FOUND" + * "FABRIC_E_SERVICE_DOES_NOT_EXIST" + * "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" + * "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" + * "FABRIC_E_PARTITION_NOT_FOUND" + * "FABRIC_E_REPLICA_DOES_NOT_EXIST" + * "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" + * "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" + * "FABRIC_E_DIRECTORY_NOT_FOUND" + * "FABRIC_E_FABRIC_VERSION_NOT_FOUND" + * "FABRIC_E_FILE_NOT_FOUND" + * "FABRIC_E_NAME_DOES_NOT_EXIST" + * "FABRIC_E_PROPERTY_DOES_NOT_EXIST" + * "FABRIC_E_ENUMERATION_COMPLETED" + * "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" + * "FABRIC_E_KEY_NOT_FOUND" + * "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + * "FABRIC_E_BACKUP_NOT_ENABLED" + * "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + * "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" + * "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" + + * + Possible values of the error code for HTTP status code 409 (Conflict) + + + * "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" + * "FABRIC_E_APPLICATION_ALREADY_EXISTS" + * "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" + * "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" + * "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" + * "FABRIC_E_SERVICE_ALREADY_EXISTS" + * "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" + * "FABRIC_E_APPLICATION_TYPE_IN_USE" + * "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" + * "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" + * "FABRIC_E_FABRIC_VERSION_IN_USE" + * "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" + * "FABRIC_E_NAME_ALREADY_EXISTS" + * "FABRIC_E_NAME_NOT_EMPTY" + * "FABRIC_E_PROPERTY_CHECK_FAILED" + * "FABRIC_E_SERVICE_METADATA_MISMATCH" + * "FABRIC_E_SERVICE_TYPE_MISMATCH" + * "FABRIC_E_HEALTH_STALE_REPORT" + * "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" + * "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" + * "FABRIC_E_INSTANCE_ID_MISMATCH" + * "FABRIC_E_BACKUP_IN_PROGRESS" + * "FABRIC_E_RESTORE_IN_PROGRESS" + * "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" + + * + Possible values of the error code for HTTP status code 413 (Request Entity Too Large) + + + * "FABRIC_E_VALUE_TOO_LARGE" + + * + Possible values of the error code for HTTP status code 500 (Internal Server Error) + + + * "FABRIC_E_NODE_IS_UP" + * "E_FAIL" + * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" + * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" + * "FABRIC_E_VOLUME_ALREADY_EXISTS" + * "FABRIC_E_VOLUME_NOT_FOUND" + * "SerializationError" + + * + Possible values of the error code for HTTP status code 503 (Service Unavailable) + + + * "FABRIC_E_NO_WRITE_QUORUM" + * "FABRIC_E_NOT_PRIMARY" + * "FABRIC_E_NOT_READY" + * "FABRIC_E_RECONFIGURATION_PENDING" + * "FABRIC_E_SERVICE_OFFLINE" + * "E_ABORT" + * "FABRIC_E_VALUE_TOO_LARGE" + + * + Possible values of the error code for HTTP status code 504 (Gateway Timeout) + + + * "FABRIC_E_COMMUNICATION_ERROR" + * "FABRIC_E_OPERATION_NOT_COMPLETE" + * "FABRIC_E_TIMEOUT" + """ + + FABRIC_E_INVALID_PARTITION_KEY = "FABRIC_E_INVALID_PARTITION_KEY" + FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR = "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" + FABRIC_E_INVALID_ADDRESS = "FABRIC_E_INVALID_ADDRESS" + FABRIC_E_APPLICATION_NOT_UPGRADING = "FABRIC_E_APPLICATION_NOT_UPGRADING" + FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR = "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" + FABRIC_E_FABRIC_NOT_UPGRADING = "FABRIC_E_FABRIC_NOT_UPGRADING" + FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR = "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" + FABRIC_E_INVALID_CONFIGURATION = "FABRIC_E_INVALID_CONFIGURATION" + FABRIC_E_INVALID_NAME_URI = "FABRIC_E_INVALID_NAME_URI" + FABRIC_E_PATH_TOO_LONG = "FABRIC_E_PATH_TOO_LONG" + FABRIC_E_KEY_TOO_LARGE = "FABRIC_E_KEY_TOO_LARGE" + FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED = "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" + FABRIC_E_INVALID_ATOMIC_GROUP = "FABRIC_E_INVALID_ATOMIC_GROUP" + FABRIC_E_VALUE_EMPTY = "FABRIC_E_VALUE_EMPTY" + FABRIC_E_NODE_NOT_FOUND = "FABRIC_E_NODE_NOT_FOUND" + FABRIC_E_APPLICATION_TYPE_NOT_FOUND = "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" + FABRIC_E_APPLICATION_NOT_FOUND = "FABRIC_E_APPLICATION_NOT_FOUND" + FABRIC_E_SERVICE_TYPE_NOT_FOUND = "FABRIC_E_SERVICE_TYPE_NOT_FOUND" + FABRIC_E_SERVICE_DOES_NOT_EXIST = "FABRIC_E_SERVICE_DOES_NOT_EXIST" + FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND = "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" + FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND = "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" + FABRIC_E_PARTITION_NOT_FOUND = "FABRIC_E_PARTITION_NOT_FOUND" + FABRIC_E_REPLICA_DOES_NOT_EXIST = "FABRIC_E_REPLICA_DOES_NOT_EXIST" + FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST = "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" + FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND = "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" + FABRIC_E_DIRECTORY_NOT_FOUND = "FABRIC_E_DIRECTORY_NOT_FOUND" + FABRIC_E_FABRIC_VERSION_NOT_FOUND = "FABRIC_E_FABRIC_VERSION_NOT_FOUND" + FABRIC_E_FILE_NOT_FOUND = "FABRIC_E_FILE_NOT_FOUND" + FABRIC_E_NAME_DOES_NOT_EXIST = "FABRIC_E_NAME_DOES_NOT_EXIST" + FABRIC_E_PROPERTY_DOES_NOT_EXIST = "FABRIC_E_PROPERTY_DOES_NOT_EXIST" + FABRIC_E_ENUMERATION_COMPLETED = "FABRIC_E_ENUMERATION_COMPLETED" + FABRIC_E_SERVICE_MANIFEST_NOT_FOUND = "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" + FABRIC_E_KEY_NOT_FOUND = "FABRIC_E_KEY_NOT_FOUND" + FABRIC_E_HEALTH_ENTITY_NOT_FOUND = "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS = "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" + FABRIC_E_APPLICATION_ALREADY_EXISTS = "FABRIC_E_APPLICATION_ALREADY_EXISTS" + FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION = "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" + FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS = "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" + FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS = "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" + FABRIC_E_SERVICE_ALREADY_EXISTS = "FABRIC_E_SERVICE_ALREADY_EXISTS" + FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS = "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" + FABRIC_E_APPLICATION_TYPE_IN_USE = "FABRIC_E_APPLICATION_TYPE_IN_USE" + FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION = "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" + FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS = "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" + FABRIC_E_FABRIC_VERSION_IN_USE = "FABRIC_E_FABRIC_VERSION_IN_USE" + FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS = "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" + FABRIC_E_NAME_ALREADY_EXISTS = "FABRIC_E_NAME_ALREADY_EXISTS" + FABRIC_E_NAME_NOT_EMPTY = "FABRIC_E_NAME_NOT_EMPTY" + FABRIC_E_PROPERTY_CHECK_FAILED = "FABRIC_E_PROPERTY_CHECK_FAILED" + FABRIC_E_SERVICE_METADATA_MISMATCH = "FABRIC_E_SERVICE_METADATA_MISMATCH" + FABRIC_E_SERVICE_TYPE_MISMATCH = "FABRIC_E_SERVICE_TYPE_MISMATCH" + FABRIC_E_HEALTH_STALE_REPORT = "FABRIC_E_HEALTH_STALE_REPORT" + FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED = "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" + FABRIC_E_NODE_HAS_NOT_STOPPED_YET = "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" + FABRIC_E_INSTANCE_ID_MISMATCH = "FABRIC_E_INSTANCE_ID_MISMATCH" + FABRIC_E_VALUE_TOO_LARGE = "FABRIC_E_VALUE_TOO_LARGE" + FABRIC_E_NO_WRITE_QUORUM = "FABRIC_E_NO_WRITE_QUORUM" + FABRIC_E_NOT_PRIMARY = "FABRIC_E_NOT_PRIMARY" + FABRIC_E_NOT_READY = "FABRIC_E_NOT_READY" + FABRIC_E_RECONFIGURATION_PENDING = "FABRIC_E_RECONFIGURATION_PENDING" + FABRIC_E_SERVICE_OFFLINE = "FABRIC_E_SERVICE_OFFLINE" + E_ABORT = "E_ABORT" + FABRIC_E_COMMUNICATION_ERROR = "FABRIC_E_COMMUNICATION_ERROR" + FABRIC_E_OPERATION_NOT_COMPLETE = "FABRIC_E_OPERATION_NOT_COMPLETE" + FABRIC_E_TIMEOUT = "FABRIC_E_TIMEOUT" + FABRIC_E_NODE_IS_UP = "FABRIC_E_NODE_IS_UP" + E_FAIL = "E_FAIL" + FABRIC_E_BACKUP_IS_ENABLED = "FABRIC_E_BACKUP_IS_ENABLED" + FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH = "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + FABRIC_E_INVALID_FOR_STATELESS_SERVICES = "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + FABRIC_E_BACKUP_NOT_ENABLED = "FABRIC_E_BACKUP_NOT_ENABLED" + FABRIC_E_BACKUP_POLICY_NOT_EXISTING = "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING = "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" + FABRIC_E_BACKUP_IN_PROGRESS = "FABRIC_E_BACKUP_IN_PROGRESS" + FABRIC_E_RESTORE_IN_PROGRESS = "FABRIC_E_RESTORE_IN_PROGRESS" + FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING = "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" + FABRIC_E_INVALID_SERVICE_SCALING_POLICY = "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + E_INVALIDARG = "E_INVALIDARG" + FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS = "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" + FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND = "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" + FABRIC_E_VOLUME_ALREADY_EXISTS = "FABRIC_E_VOLUME_ALREADY_EXISTS" + FABRIC_E_VOLUME_NOT_FOUND = "FABRIC_E_VOLUME_NOT_FOUND" + SERIALIZATION_ERROR = "SerializationError" + FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR = "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" + +class FabricEventKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of FabricEvent. + """ + + CLUSTER_EVENT = "ClusterEvent" + CONTAINER_INSTANCE_EVENT = "ContainerInstanceEvent" + NODE_EVENT = "NodeEvent" + APPLICATION_EVENT = "ApplicationEvent" + SERVICE_EVENT = "ServiceEvent" + PARTITION_EVENT = "PartitionEvent" + REPLICA_EVENT = "ReplicaEvent" + PARTITION_ANALYSIS_EVENT = "PartitionAnalysisEvent" + APPLICATION_CREATED = "ApplicationCreated" + APPLICATION_DELETED = "ApplicationDeleted" + APPLICATION_NEW_HEALTH_REPORT = "ApplicationNewHealthReport" + APPLICATION_HEALTH_REPORT_EXPIRED = "ApplicationHealthReportExpired" + APPLICATION_UPGRADE_COMPLETED = "ApplicationUpgradeCompleted" + APPLICATION_UPGRADE_DOMAIN_COMPLETED = "ApplicationUpgradeDomainCompleted" + APPLICATION_UPGRADE_ROLLBACK_COMPLETED = "ApplicationUpgradeRollbackCompleted" + APPLICATION_UPGRADE_ROLLBACK_STARTED = "ApplicationUpgradeRollbackStarted" + APPLICATION_UPGRADE_STARTED = "ApplicationUpgradeStarted" + DEPLOYED_APPLICATION_NEW_HEALTH_REPORT = "DeployedApplicationNewHealthReport" + DEPLOYED_APPLICATION_HEALTH_REPORT_EXPIRED = "DeployedApplicationHealthReportExpired" + APPLICATION_PROCESS_EXITED = "ApplicationProcessExited" + APPLICATION_CONTAINER_INSTANCE_EXITED = "ApplicationContainerInstanceExited" + NODE_ABORTED = "NodeAborted" + NODE_ADDED_TO_CLUSTER = "NodeAddedToCluster" + NODE_CLOSED = "NodeClosed" + NODE_DEACTIVATE_COMPLETED = "NodeDeactivateCompleted" + NODE_DEACTIVATE_STARTED = "NodeDeactivateStarted" + NODE_DOWN = "NodeDown" + NODE_NEW_HEALTH_REPORT = "NodeNewHealthReport" + NODE_HEALTH_REPORT_EXPIRED = "NodeHealthReportExpired" + NODE_OPEN_SUCCEEDED = "NodeOpenSucceeded" + NODE_OPEN_FAILED = "NodeOpenFailed" + NODE_REMOVED_FROM_CLUSTER = "NodeRemovedFromCluster" + NODE_UP = "NodeUp" + PARTITION_NEW_HEALTH_REPORT = "PartitionNewHealthReport" + PARTITION_HEALTH_REPORT_EXPIRED = "PartitionHealthReportExpired" + PARTITION_RECONFIGURED = "PartitionReconfigured" + PARTITION_PRIMARY_MOVE_ANALYSIS = "PartitionPrimaryMoveAnalysis" + SERVICE_CREATED = "ServiceCreated" + SERVICE_DELETED = "ServiceDeleted" + SERVICE_NEW_HEALTH_REPORT = "ServiceNewHealthReport" + SERVICE_HEALTH_REPORT_EXPIRED = "ServiceHealthReportExpired" + DEPLOYED_SERVICE_PACKAGE_NEW_HEALTH_REPORT = "DeployedServicePackageNewHealthReport" + DEPLOYED_SERVICE_PACKAGE_HEALTH_REPORT_EXPIRED = "DeployedServicePackageHealthReportExpired" + STATEFUL_REPLICA_NEW_HEALTH_REPORT = "StatefulReplicaNewHealthReport" + STATEFUL_REPLICA_HEALTH_REPORT_EXPIRED = "StatefulReplicaHealthReportExpired" + STATELESS_REPLICA_NEW_HEALTH_REPORT = "StatelessReplicaNewHealthReport" + STATELESS_REPLICA_HEALTH_REPORT_EXPIRED = "StatelessReplicaHealthReportExpired" + CLUSTER_NEW_HEALTH_REPORT = "ClusterNewHealthReport" + CLUSTER_HEALTH_REPORT_EXPIRED = "ClusterHealthReportExpired" + CLUSTER_UPGRADE_COMPLETED = "ClusterUpgradeCompleted" + CLUSTER_UPGRADE_DOMAIN_COMPLETED = "ClusterUpgradeDomainCompleted" + CLUSTER_UPGRADE_ROLLBACK_COMPLETED = "ClusterUpgradeRollbackCompleted" + CLUSTER_UPGRADE_ROLLBACK_STARTED = "ClusterUpgradeRollbackStarted" + CLUSTER_UPGRADE_STARTED = "ClusterUpgradeStarted" + CHAOS_STOPPED = "ChaosStopped" + CHAOS_STARTED = "ChaosStarted" + CHAOS_CODE_PACKAGE_RESTART_SCHEDULED = "ChaosCodePackageRestartScheduled" + CHAOS_REPLICA_REMOVAL_SCHEDULED = "ChaosReplicaRemovalScheduled" + CHAOS_PARTITION_SECONDARY_MOVE_SCHEDULED = "ChaosPartitionSecondaryMoveScheduled" + CHAOS_PARTITION_PRIMARY_MOVE_SCHEDULED = "ChaosPartitionPrimaryMoveScheduled" + CHAOS_REPLICA_RESTART_SCHEDULED = "ChaosReplicaRestartScheduled" + CHAOS_NODE_RESTART_SCHEDULED = "ChaosNodeRestartScheduled" + +class FabricReplicaStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the status of the replica. + """ + + #: Indicates that the read or write operation access status is not valid. This value is not + #: returned to the caller. + INVALID = "Invalid" + #: Indicates that the replica is down. + DOWN = "Down" + #: Indicates that the replica is up. + UP = "Up" + +class FailureAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The compensating action to perform when a Monitored upgrade encounters monitoring policy or + health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will start + rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. + """ + + #: Indicates the failure action is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: The upgrade will start rolling back automatically. The value is 1. + ROLLBACK = "Rollback" + #: The upgrade will switch to UnmonitoredManual upgrade mode. The value is 2. + MANUAL = "Manual" + +class FailureReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The cause of an upgrade failure that resulted in FailureAction being executed. + """ + + #: Indicates the reason is invalid or unknown. All Service Fabric enumerations have the invalid + #: type. The value is zero. + NONE = "None" + #: There was an external request to roll back the upgrade. The value is 1. + INTERRUPTED = "Interrupted" + #: The upgrade failed due to health policy violations. The value is 2. + HEALTH_CHECK = "HealthCheck" + #: An upgrade domain took longer than the allowed upgrade domain timeout to process. The value is + #: 3. + UPGRADE_DOMAIN_TIMEOUT = "UpgradeDomainTimeout" + #: The overall upgrade took longer than the allowed upgrade timeout to process. The value is 4. + OVERALL_UPGRADE_TIMEOUT = "OverallUpgradeTimeout" + +class HeaderMatchType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """how to match header value + """ + + EXACT = "exact" + +class HealthEvaluationKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The health manager in the cluster performs health evaluations in determining the aggregated + health state of an entity. This enumeration provides information on the kind of evaluation that + was performed. Following are the possible values. + """ + + #: Indicates that the health evaluation is invalid. The value is zero. + INVALID = "Invalid" + #: Indicates that the health evaluation is for a health event. The value is 1. + EVENT = "Event" + #: Indicates that the health evaluation is for the replicas of a partition. The value is 2. + REPLICAS = "Replicas" + #: Indicates that the health evaluation is for the partitions of a service. The value is 3. + PARTITIONS = "Partitions" + #: Indicates that the health evaluation is for the deployed service packages of a deployed + #: application. The value is 4. + DEPLOYED_SERVICE_PACKAGES = "DeployedServicePackages" + #: Indicates that the health evaluation is for the deployed applications of an application. The + #: value is 5. + DEPLOYED_APPLICATIONS = "DeployedApplications" + #: Indicates that the health evaluation is for services of an application. The value is 6. + SERVICES = "Services" + #: Indicates that the health evaluation is for the cluster nodes. The value is 7. + NODES = "Nodes" + #: Indicates that the health evaluation is for the cluster applications. The value is 8. + APPLICATIONS = "Applications" + #: Indicates that the health evaluation is for the system application. The value is 9. + SYSTEM_APPLICATION = "SystemApplication" + #: Indicates that the health evaluation is for the deployed applications of an application in an + #: upgrade domain. The value is 10. + UPGRADE_DOMAIN_DEPLOYED_APPLICATIONS = "UpgradeDomainDeployedApplications" + #: Indicates that the health evaluation is for the cluster nodes in an upgrade domain. The value + #: is 11. + UPGRADE_DOMAIN_NODES = "UpgradeDomainNodes" + #: Indicates that the health evaluation is for a replica. The value is 13. + REPLICA = "Replica" + #: Indicates that the health evaluation is for a partition. The value is 14. + PARTITION = "Partition" + #: Indicates that the health evaluation is for a deployed service package. The value is 16. + DEPLOYED_SERVICE_PACKAGE = "DeployedServicePackage" + #: Indicates that the health evaluation is for a deployed application. The value is 17. + DEPLOYED_APPLICATION = "DeployedApplication" + #: Indicates that the health evaluation is for a service. The value is 15. + SERVICE = "Service" + #: Indicates that the health evaluation is for a node. The value is 12. + NODE = "Node" + #: Indicates that the health evaluation is for an application. The value is 18. + APPLICATION = "Application" + #: Indicates that the health evaluation is for the delta of unhealthy cluster nodes. The value is + #: 19. + DELTA_NODES_CHECK = "DeltaNodesCheck" + #: Indicates that the health evaluation is for the delta of unhealthy upgrade domain cluster + #: nodes. The value is 20. + UPGRADE_DOMAIN_DELTA_NODES_CHECK = "UpgradeDomainDeltaNodesCheck" + #: – Indicates that the health evaluation is for applications of an application type. The value is + #: 21. + APPLICATION_TYPE_APPLICATIONS = "ApplicationTypeApplications" + #: – Indicates that the health evaluation is for nodes of a node type. The value is 22. + NODE_TYPE_NODES = "NodeTypeNodes" + +class HealthState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The health state of a Service Fabric entity such as Cluster, Node, Application, Service, + Partition, Replica etc. + """ + + #: Indicates an invalid health state. All Service Fabric enumerations have the invalid type. The + #: value is zero. + INVALID = "Invalid" + #: Indicates the health state is okay. The value is 1. + OK = "Ok" + #: Indicates the health state is at a warning level. The value is 2. + WARNING = "Warning" + #: Indicates the health state is at an error level. Error health state should be investigated, as + #: they can impact the correct functionality of the cluster. The value is 3. + ERROR = "Error" + #: Indicates an unknown health status. The value is 65535. + UNKNOWN = "Unknown" + +class HostIsolationMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the isolation mode of main entry point of a code package when it's host type is + ContainerHost. This is specified as part of container host policies in application manifest + while importing service manifest. + """ + + #: Indicates the isolation mode is not applicable for given HostType. The value is 0. + NONE = "None" + #: This is the default isolation mode for a ContainerHost. The value is 1. + PROCESS = "Process" + #: Indicates the ContainerHost is a Hyper-V container. This applies to only Windows containers. + #: The value is 2. + HYPER_V = "HyperV" + +class HostOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """choices for server host + """ + + #: host: http://localhost:19080/. + HTTP_LOCALHOST19080_ = "http://localhost:19080/" + #: host: https://localhost:19080/. + HTTPS_LOCALHOST19080_ = "https://localhost:19080/" + +class HostType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the type of host for main entry point of a code package as specified in service + manifest. + """ + + #: Indicates the type of host is not known or invalid. The value is 0. + INVALID = "Invalid" + #: Indicates the host is an executable. The value is 1. + EXE_HOST = "ExeHost" + #: Indicates the host is a container. The value is 2. + CONTAINER_HOST = "ContainerHost" + +class ImageRegistryPasswordType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of the image registry password being given in password + """ + + #: The image registry password in clear text, will not be processed in any way and used directly. + CLEAR_TEXT = "ClearText" + #: The URI to a KeyVault secret version, will be resolved using the application's managed identity + #: (this type is only valid if the app was assigned a managed identity) before getting used. + KEY_VAULT_REFERENCE = "KeyVaultReference" + #: The reference to a SecretValue resource, will be resolved before getting used. + SECRET_VALUE_REFERENCE = "SecretValueReference" + +class ImpactLevel(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The level of impact expected. + """ + + INVALID = "Invalid" + NONE = "None" + RESTART = "Restart" + REMOVE_DATA = "RemoveData" + REMOVE_NODE = "RemoveNode" + +class ManagedIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of managed identity to be used to connect to Azure Blob Store via Managed Identity. + """ + + #: Indicates an invalid managed identity type. All Service Fabric enumerations have the invalid + #: type. + INVALID = "Invalid" + #: Indicates VMSS managed identity should be used to connect to Azure blob store. + VMSS = "VMSS" + #: Indicates cluster managed identity should be used to connect to Azure blob store. + CLUSTER = "Cluster" + +class MoveCost(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the move cost for the service. + """ + + #: Zero move cost. This value is zero. + ZERO = "Zero" + #: Specifies the move cost of the service as Low. The value is 1. + LOW = "Low" + #: Specifies the move cost of the service as Medium. The value is 2. + MEDIUM = "Medium" + #: Specifies the move cost of the service as High. The value is 3. + HIGH = "High" + #: Specifies the move cost of the service as VeryHigh. The value is 4. + VERY_HIGH = "VeryHigh" + +class NetworkKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of a Service Fabric container network. + """ + + #: Indicates a container network local to a single Service Fabric cluster. The value is 1. + LOCAL = "Local" + +class NodeDeactivationIntent(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The intent or the reason for deactivating the node. Following are the possible values for it. + """ + + #: Indicates the node deactivation intent is invalid. All Service Fabric enumerations have the + #: invalid type. The value is zero. This value is not used. + INVALID = "Invalid" + #: Indicates that the node should be paused. The value is 1. + PAUSE = "Pause" + #: Indicates that the intent is for the node to be restarted after a short period of time. Service + #: Fabric does not restart the node, this action is done outside of Service Fabric. The value is + #: 2. + RESTART = "Restart" + #: Indicates that the intent is to reimage the node. Service Fabric does not reimage the node, + #: this action is done outside of Service Fabric. The value is 3. + REMOVE_DATA = "RemoveData" + #: Indicates that the node is being decommissioned and is not expected to return. Service Fabric + #: does not decommission the node, this action is done outside of Service Fabric. The value is 4. + REMOVE_NODE = "RemoveNode" + +class NodeDeactivationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of node deactivation operation. Following are the possible values. + """ + + #: No status is associated with the task. The value is zero. + NONE = "None" + #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe + #: to proceed to ensure availability of the service and reliability of the state. This value + #: indicates that one or more safety checks are in progress. The value is 1. + SAFETY_CHECK_IN_PROGRESS = "SafetyCheckInProgress" + #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe + #: to proceed to ensure availability of the service and reliability of the state. This value + #: indicates that all safety checks have been completed. The value is 2. + SAFETY_CHECK_COMPLETE = "SafetyCheckComplete" + #: The task is completed. The value is 3. + COMPLETED = "Completed" + +class NodeDeactivationTaskType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of the task that performed the node deactivation. Following are the possible values. + """ + + #: Indicates the node deactivation task type is invalid. All Service Fabric enumerations have the + #: invalid type. The value is zero. This value is not used. + INVALID = "Invalid" + #: Specifies the task created by Infrastructure hosting the nodes. The value is 1. + INFRASTRUCTURE = "Infrastructure" + #: Specifies the task that was created by the Repair Manager service. The value is 2. + REPAIR = "Repair" + #: Specifies that the task was created by using the public API. The value is 3. + CLIENT = "Client" + +class NodeStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the node. + """ + + #: Indicates the node status is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: Indicates the node is up. The value is 1. + UP = "Up" + #: Indicates the node is down. The value is 2. + DOWN = "Down" + #: Indicates the node is in process of being enabled. The value is 3. + ENABLING = "Enabling" + #: Indicates the node is in the process of being disabled. The value is 4. + DISABLING = "Disabling" + #: Indicates the node is disabled. The value is 5. + DISABLED = "Disabled" + #: Indicates the node is unknown. A node would be in Unknown state if Service Fabric does not have + #: authoritative information about that node. This can happen if the system learns about a node at + #: runtime.The value is 6. + UNKNOWN = "Unknown" + #: Indicates the node is removed. A node would be in Removed state if NodeStateRemoved API has + #: been called for this node. In other words, Service Fabric has been informed that the persisted + #: state on the node has been permanently lost. The value is 7. + REMOVED = "Removed" + +class NodeStatusFilter(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + #: This filter value will match all of the nodes excepts the ones with status as Unknown or + #: Removed. + DEFAULT = "default" + #: This filter value will match all of the nodes. + ALL = "all" + #: This filter value will match nodes that are Up. + UP = "up" + #: This filter value will match nodes that are Down. + DOWN = "down" + #: This filter value will match nodes that are in the process of being enabled with status as + #: Enabling. + ENABLING = "enabling" + #: This filter value will match nodes that are in the process of being disabled with status as + #: Disabling. + DISABLING = "disabling" + #: This filter value will match nodes that are Disabled. + DISABLED = "disabled" + #: This filter value will match nodes whose status is Unknown. A node would be in Unknown state if + #: Service Fabric does not have authoritative information about that node. This can happen if the + #: system learns about a node at runtime. + UNKNOWN = "unknown" + #: This filter value will match nodes whose status is Removed. These are the nodes that are + #: removed from the cluster using the RemoveNodeState API. + REMOVED = "removed" + +class NodeTransitionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + #: Reserved. Do not pass into API. + INVALID = "Invalid" + #: Transition a stopped node to up. + START = "Start" + #: Transition an up node to stopped. + STOP = "Stop" + +class NodeUpgradePhase(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The state of the upgrading node. + """ + + #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: The upgrade has not started yet due to pending safety checks. The value is 1. + PRE_UPGRADE_SAFETY_CHECK = "PreUpgradeSafetyCheck" + #: The upgrade is in progress. The value is 2. + UPGRADING = "Upgrading" + #: The upgrade has completed and post upgrade safety checks are being performed. The value is 3. + POST_UPGRADE_SAFETY_CHECK = "PostUpgradeSafetyCheck" + +class OperatingSystemType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The operation system required by the code in service. + """ + + #: The required operating system is Linux. + LINUX = "Linux" + #: The required operating system is Windows. + WINDOWS = "Windows" + +class OperationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The state of the operation. + """ + + #: The operation state is invalid. + INVALID = "Invalid" + #: The operation is in progress. + RUNNING = "Running" + #: The operation is rolling back internal system state because it encountered a fatal error or was + #: cancelled by the user. "RollingBack" does not refer to user state. For example, if + #: CancelOperation is called on a command of type PartitionDataLoss, state of "RollingBack" does + #: not mean service data is being restored (assuming the command has progressed far enough to + #: cause data loss). It means the system is rolling back/cleaning up internal system state + #: associated with the command. + ROLLING_BACK = "RollingBack" + #: The operation has completed successfully and is no longer running. + COMPLETED = "Completed" + #: The operation has failed and is no longer running. + FAULTED = "Faulted" + #: The operation was cancelled by the user using CancelOperation, and is no longer running. + CANCELLED = "Cancelled" + #: The operation was cancelled by the user using CancelOperation, with the force parameter set to + #: true. It is no longer running. Refer to CancelOperation for more details. + FORCE_CANCELLED = "ForceCancelled" + +class OperationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of the operation. + """ + + #: The operation state is invalid. + INVALID = "Invalid" + #: An operation started using the StartDataLoss API. + PARTITION_DATA_LOSS = "PartitionDataLoss" + #: An operation started using the StartQuorumLoss API. + PARTITION_QUORUM_LOSS = "PartitionQuorumLoss" + #: An operation started using the StartPartitionRestart API. + PARTITION_RESTART = "PartitionRestart" + #: An operation started using the StartNodeTransition API. + NODE_TRANSITION = "NodeTransition" + +class Ordering(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Defines the order. + """ + + #: Descending sort order. + DESC = "Desc" + #: Ascending sort order. + ASC = "Asc" + +class PackageSharingPolicyScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Represents the scope for PackageSharingPolicy. This is specified during + DeployServicePackageToNode operation. + """ + + #: No package sharing policy scope. The value is 0. + NONE = "None" + #: Share all code, config and data packages from corresponding service manifest. The value is 1. + ALL = "All" + #: Share all code packages from corresponding service manifest. The value is 2. + CODE = "Code" + #: Share all config packages from corresponding service manifest. The value is 3. + CONFIG = "Config" + #: Share all data packages from corresponding service manifest. The value is 4. + DATA = "Data" + +class PartitionAccessStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the access status of the partition. + """ + + #: Indicates that the read or write operation access status is not valid. This value is not + #: returned to the caller. + INVALID = "Invalid" + #: Indicates that the read or write operation access is granted and the operation is allowed. + GRANTED = "Granted" + #: Indicates that the client should try again later, because a reconfiguration is in progress. + RECONFIGURATION_PENDING = "ReconfigurationPending" + #: Indicates that this client request was received by a replica that is not a Primary replica. + NOT_PRIMARY = "NotPrimary" + #: Indicates that no write quorum is available and, therefore, no write operation can be accepted. + NO_WRITE_QUORUM = "NoWriteQuorum" + +class PartitionScheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumerates the ways that a service can be partitioned. + """ + + #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: Indicates that the partition is based on string names, and is a + #: SingletonPartitionSchemeDescription object, The value is 1. + SINGLETON = "Singleton" + #: Indicates that the partition is based on Int64 key ranges, and is a + #: UniformInt64RangePartitionSchemeDescription object. The value is 2. + UNIFORM_INT64_RANGE = "UniformInt64Range" + #: Indicates that the partition is based on string names, and is a NamedPartitionSchemeDescription + #: object. The value is 3. + NAMED = "Named" + +class PathMatchType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """how to match value in the Uri + """ + + PREFIX = "prefix" + +class PropertyBatchInfoKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of property batch info, determined by the results of a property batch. The following + are the possible values. + """ + + #: Indicates the property batch info is invalid. All Service Fabric enumerations have the invalid + #: type. + INVALID = "Invalid" + #: The property batch succeeded. + SUCCESSFUL = "Successful" + #: The property batch failed. + FAILED = "Failed" + +class PropertyBatchOperationKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of property batch operation, determined by the operation to be performed. The + following are the possible values. + """ + + #: Indicates the property operation is invalid. All Service Fabric enumerations have the invalid + #: type. The value is zero. + INVALID = "Invalid" + #: The operation will create or edit a property. The value is 1. + PUT = "Put" + #: The operation will get a property. The value is 2. + GET = "Get" + #: The operation will check that a property exists or doesn't exists, depending on the provided + #: value. The value is 3. + CHECK_EXISTS = "CheckExists" + #: The operation will ensure that the sequence number is equal to the provided value. The value is + #: 4. + CHECK_SEQUENCE = "CheckSequence" + #: The operation will delete a property. The value is 5. + DELETE = "Delete" + #: The operation will ensure that the value of a property is equal to the provided value. The + #: value is 7. + CHECK_VALUE = "CheckValue" + +class PropertyValueKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of property, determined by the type of data. Following are the possible values. + """ + + #: Indicates the property is invalid. All Service Fabric enumerations have the invalid type. The + #: value is zero. + INVALID = "Invalid" + #: The data inside the property is a binary blob. The value is 1. + BINARY = "Binary" + #: The data inside the property is an int64. The value is 2. + INT64 = "Int64" + #: The data inside the property is a double. The value is 3. + DOUBLE = "Double" + #: The data inside the property is a string. The value is 4. + STRING = "String" + #: The data inside the property is a guid. The value is 5. + GUID = "Guid" + +class ProvisionApplicationTypeKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of application type registration or provision requested. The application package can + be registered or provisioned either from the image store or from an external store. Following + are the kinds of the application type provision. + """ + + #: Indicates that the provision kind is invalid. This value is default and should not be used. The + #: value is zero. + INVALID = "Invalid" + #: Indicates that the provision is for a package that was previously uploaded to the image store. + #: The value is 1. + IMAGE_STORE_PATH = "ImageStorePath" + #: Indicates that the provision is for an application package that was previously uploaded to an + #: external store. The application package ends with the extension *.sfpkg. The value is 2. + EXTERNAL_STORE = "ExternalStore" + +class QuorumLossMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + #: Reserved. Do not pass into API. + INVALID = "Invalid" + #: Partial Quorum loss mode : Minimum number of replicas for a partition will be down that will + #: cause a quorum loss. + QUORUM_REPLICAS = "QuorumReplicas" + ALL_REPLICAS = "AllReplicas" + +class ReconfigurationPhase(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The reconfiguration phase of a replica of a stateful service. + """ + + #: Indicates the invalid reconfiguration phase. + UNKNOWN = "Unknown" + #: Specifies that there is no reconfiguration in progress. + NONE = "None" + #: Refers to the phase where the reconfiguration is transferring data from the previous primary to + #: the new primary. + PHASE0 = "Phase0" + #: Refers to the phase where the reconfiguration is querying the replica set for the progress. + PHASE1 = "Phase1" + #: Refers to the phase where the reconfiguration is ensuring that data from the current primary is + #: present in a majority of the replica set. + PHASE2 = "Phase2" + #: This phase is for internal use only. + PHASE3 = "Phase3" + #: This phase is for internal use only. + PHASE4 = "Phase4" + #: This phase is for internal use only. + ABORT_PHASE_ZERO = "AbortPhaseZero" + +class ReconfigurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of reconfiguration for replica of a stateful service. + """ + + #: Indicates the invalid reconfiguration type. + UNKNOWN = "Unknown" + #: Specifies that the primary replica is being swapped with a different replica. + SWAP_PRIMARY = "SwapPrimary" + #: Reconfiguration triggered in response to a primary going down. This could be due to many + #: reasons such as primary replica crashing etc. + FAILOVER = "Failover" + #: Reconfigurations where the primary replica is not changing. + OTHER = "Other" + +class RepairImpactKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the kind of the impact. This type supports the Service Fabric platform; it is not + meant to be used directly from your code.' + """ + + #: The repair impact is not valid or is of an unknown type. + INVALID = "Invalid" + #: The repair impact affects a set of Service Fabric nodes. + NODE = "Node" + +class RepairTargetKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the kind of the repair target. This type supports the Service Fabric platform; it is + not meant to be used directly from your code.' + """ + + #: The repair target is not valid or is of an unknown type. + INVALID = "Invalid" + #: The repair target is a set of Service Fabric nodes. + NODE = "Node" + +class RepairTaskHealthCheckState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the workflow state of a repair task's health check. This type supports the Service + Fabric platform; it is not meant to be used directly from your code. + """ + + #: Indicates that the health check has not started. + NOT_STARTED = "NotStarted" + #: Indicates that the health check is in progress. + IN_PROGRESS = "InProgress" + #: Indicates that the health check succeeded. + SUCCEEDED = "Succeeded" + #: Indicates that the health check was skipped. + SKIPPED = "Skipped" + #: Indicates that the health check timed out. + TIMED_OUT = "TimedOut" + +class ReplicaHealthReportServiceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + #: Does not use Service Fabric to make its state highly available or reliable. The value is 1. + STATELESS = "Stateless" + #: Uses Service Fabric to make its state or part of its state highly available and reliable. The + #: value is 2. + STATEFUL = "Stateful" + +class ReplicaKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The role of a replica of a stateful service. + """ + + #: Represents an invalid replica kind. The value is zero. + INVALID = "Invalid" + #: Represents a key value store replica. The value is 1. + KEY_VALUE_STORE = "KeyValueStore" + +class ReplicaRole(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The role of a replica of a stateful service. + """ + + #: Indicates the initial role that a replica is created in. The value is zero. + UNKNOWN = "Unknown" + #: Specifies that the replica has no responsibility in regard to the replica set. The value is 1. + NONE = "None" + #: Refers to the replica in the set on which all read and write operations are complete in order + #: to enforce strong consistency semantics. Read operations are handled directly by the Primary + #: replica, while write operations must be acknowledged by a quorum of the replicas in the replica + #: set. There can only be one Primary replica in a replica set at a time. The value is 2. + PRIMARY = "Primary" + #: Refers to a replica in the set that receives a state transfer from the Primary replica to + #: prepare for becoming an active Secondary replica. There can be multiple Idle Secondary replicas + #: in a replica set at a time. Idle Secondary replicas do not count as a part of a write quorum. + #: The value is 3. + IDLE_SECONDARY = "IdleSecondary" + #: Refers to a replica in the set that receives state updates from the Primary replica, applies + #: them, and sends acknowledgements back. Secondary replicas must participate in the write quorum + #: for a replica set. There can be multiple active Secondary replicas in a replica set at a time. + #: The number of active Secondary replicas is configurable that the reliability subsystem should + #: maintain. The value is 4. + ACTIVE_SECONDARY = "ActiveSecondary" + +class ReplicaStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of a replica of a service. + """ + + #: Indicates the replica status is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: The replica is being built. This means that a primary replica is seeding this replica. The + #: value is 1. + IN_BUILD = "InBuild" + #: The replica is in standby. The value is 2. + STANDBY = "Standby" + #: The replica is ready. The value is 3. + READY = "Ready" + #: The replica is down. The value is 4. + DOWN = "Down" + #: Replica is dropped. This means that the replica has been removed from the replica set. If it is + #: persisted, its state has been deleted. The value is 5. + DROPPED = "Dropped" + +class ReplicatorOperationName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the operation currently being executed by the Replicator. + """ + + #: Default value if the replicator is not yet ready. + INVALID = "Invalid" + #: Replicator is not running any operation from Service Fabric perspective. + NONE = "None" + #: Replicator is opening. + OPEN = "Open" + #: Replicator is in the process of changing its role. + CHANGE_ROLE = "ChangeRole" + #: Due to a change in the replica set, replicator is being updated with its Epoch. + UPDATE_EPOCH = "UpdateEpoch" + #: Replicator is closing. + CLOSE = "Close" + #: Replicator is being aborted. + ABORT = "Abort" + #: Replicator is handling the data loss condition, where the user service may potentially be + #: recovering state from an external source. + ON_DATA_LOSS = "OnDataLoss" + #: Replicator is waiting for a quorum of replicas to be caught up to the latest state. + WAIT_FOR_CATCHUP = "WaitForCatchup" + #: Replicator is in the process of building one or more replicas. + BUILD = "Build" + +class ResourceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Status of the resource. + """ + + #: Indicates the resource status is unknown. The value is zero. + UNKNOWN = "Unknown" + #: Indicates the resource is ready. The value is 1. + READY = "Ready" + #: Indicates the resource is upgrading. The value is 2. + UPGRADING = "Upgrading" + #: Indicates the resource is being created. The value is 3. + CREATING = "Creating" + #: Indicates the resource is being deleted. The value is 4. + DELETING = "Deleting" + #: Indicates the resource is not functional due to persistent failures. See statusDetails property + #: for more details. The value is 5. + FAILED = "Failed" + +class RestartPartitionMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + #: Reserved. Do not pass into API. + INVALID = "Invalid" + #: All replicas or instances in the partition are restarted at once. + ALL_REPLICAS_OR_INSTANCES = "AllReplicasOrInstances" + #: Only the secondary replicas are restarted. + ONLY_ACTIVE_SECONDARIES = "OnlyActiveSecondaries" + +class RestartPolicy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumerates the restart policy for RunToCompletionExecutionPolicy + """ + + #: Service will be restarted when it encounters a failure. + ON_FAILURE = "OnFailure" + #: Service will never be restarted. If the service encounters a failure, it will move to Failed + #: state. + NEVER = "Never" + +class RestoreState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Represents the current state of the partition restore operation. + """ + + #: Indicates an invalid restore state. All Service Fabric enumerations have the invalid type. + INVALID = "Invalid" + #: Operation has been validated and accepted. Restore is yet to be triggered. + ACCEPTED = "Accepted" + #: Restore operation has been triggered and is under process. + RESTORE_IN_PROGRESS = "RestoreInProgress" + #: Operation completed with success. + SUCCESS = "Success" + #: Operation completed with failure. + FAILURE = "Failure" + #: Operation timed out. + TIMEOUT = "Timeout" + +class ResultStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """A value describing the overall result of the repair task execution. Must be specified in the + Restoring and later states, and is immutable once set. + """ + + #: Indicates that the repair task result is invalid. All Service Fabric enumerations have the + #: invalid value. + INVALID = "Invalid" + #: Indicates that the repair task completed execution successfully. + SUCCEEDED = "Succeeded" + #: Indicates that the repair task was cancelled prior to execution. + CANCELLED = "Cancelled" + #: Indicates that execution of the repair task was interrupted by a cancellation request after + #: some work had already been performed. + INTERRUPTED = "Interrupted" + #: Indicates that there was a failure during execution of the repair task. Some work may have been + #: performed. + FAILED = "Failed" + #: Indicates that the repair task result is not yet available, because the repair task has not + #: finished executing. + PENDING = "Pending" + +class RetentionPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of retention policy. Currently only "Basic" retention policy is supported. + """ + + #: Indicates a basic retention policy type. + BASIC = "Basic" + #: Indicates an invalid retention policy type. + INVALID = "Invalid" + +class RollingUpgradeMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The mode used to monitor health during a rolling upgrade. The values are UnmonitoredAuto, + UnmonitoredManual, and Monitored. + """ + + #: Indicates the upgrade mode is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: The upgrade will proceed automatically without performing any health monitoring. The value is + #: 1. + UNMONITORED_AUTO = "UnmonitoredAuto" + #: The upgrade will stop after completing each upgrade domain, giving the opportunity to manually + #: monitor health before proceeding. The value is 2. + UNMONITORED_MANUAL = "UnmonitoredManual" + #: The upgrade will stop after completing each upgrade domain and automatically monitor health + #: before proceeding. The value is 3. + MONITORED = "Monitored" + +class SafetyCheckKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of safety check performed by service fabric before continuing with the operations. + These checks ensure the availability of the service and the reliability of the state. Following + are the kinds of safety checks. + """ + + #: Indicates that the upgrade safety check kind is invalid. All Service Fabric enumerations have + #: the invalid type. The value is zero. + INVALID = "Invalid" + #: Indicates that if we bring down the node then this will result in global seed node quorum loss. + #: The value is 1. + ENSURE_SEED_NODE_QUORUM = "EnsureSeedNodeQuorum" + #: Indicates that there is some partition for which if we bring down the replica on the node, it + #: will result in quorum loss for that partition. The value is 2. + ENSURE_PARTITION_QUORUM = "EnsurePartitionQuorum" + #: Indicates that there is some replica on the node that was moved out of this node due to + #: upgrade. Service Fabric is now waiting for the primary to be moved back to this node. The value + #: is 3. + WAIT_FOR_PRIMARY_PLACEMENT = "WaitForPrimaryPlacement" + #: Indicates that Service Fabric is waiting for a primary replica to be moved out of the node + #: before starting upgrade on that node. The value is 4. + WAIT_FOR_PRIMARY_SWAP = "WaitForPrimarySwap" + #: Indicates that there is some replica on the node that is involved in a reconfiguration. Service + #: Fabric is waiting for the reconfiguration to be complete before staring upgrade on that node. + #: The value is 5. + WAIT_FOR_RECONFIGURATION = "WaitForReconfiguration" + #: Indicates that there is either a replica on the node that is going through copy, or there is a + #: primary replica on the node that is copying data to some other replica. In both cases, bringing + #: down the replica on the node due to upgrade will abort the copy. The value is 6. + WAIT_FOR_INBUILD_REPLICA = "WaitForInbuildReplica" + #: Indicates that there is either a stateless service partition on the node having exactly one + #: instance, or there is a primary replica on the node for which the partition is quorum loss. In + #: both cases, bringing down the replicas due to upgrade will result in loss of availability. The + #: value is 7. + ENSURE_AVAILABILITY = "EnsureAvailability" + +class ScalingMechanismKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumerates the ways that a service can be scaled. + """ + + #: Indicates the scaling mechanism is invalid. All Service Fabric enumerations have the invalid + #: type. The value is zero. + INVALID = "Invalid" + #: Indicates a mechanism for scaling where new instances are added or removed from a partition. + #: The value is 1. + PARTITION_INSTANCE_COUNT = "PartitionInstanceCount" + #: Indicates a mechanism for scaling where new named partitions are added or removed from a + #: service. The value is 2. + ADD_REMOVE_INCREMENTAL_NAMED_PARTITION = "AddRemoveIncrementalNamedPartition" + +class ScalingTriggerKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumerates the ways that a service can be scaled. + """ + + #: Indicates the scaling trigger is invalid. All Service Fabric enumerations have the invalid + #: type. The value is zero. + INVALID = "Invalid" + #: Indicates a trigger where scaling decisions are made based on average load of a partition. The + #: value is 1. + AVERAGE_PARTITION_LOAD = "AveragePartitionLoad" + #: Indicates a trigger where scaling decisions are made based on average load of a service. The + #: value is 2. + AVERAGE_SERVICE_LOAD = "AverageServiceLoad" + +class Scheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Scheme for the http probe. Can be Http or Https. + """ + + #: Indicates that the probe is http. + HTTP = "http" + #: Indicates that the probe is https. No cert validation. + HTTPS = "https" + +class SecretKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the kind of secret. + """ + + #: A simple secret resource whose plaintext value is provided by the user. + INLINED_VALUE = "inlinedValue" + #: A secret resource that references a specific version of a secret stored in Azure Key Vault; the + #: expected value is a versioned KeyVault URI corresponding to the version of the secret being + #: referenced. + KEY_VAULT_VERSIONED_REFERENCE = "keyVaultVersionedReference" + +class ServiceCorrelationScheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The service correlation scheme. + """ + + #: An invalid correlation scheme. Cannot be used. The value is zero. + INVALID = "Invalid" + #: Indicates that this service has an affinity relationship with another service. Provided for + #: backwards compatibility, consider preferring the Aligned or NonAlignedAffinity options. The + #: value is 1. + AFFINITY = "Affinity" + #: Aligned affinity ensures that the primaries of the partitions of the affinitized services are + #: collocated on the same nodes. This is the default and is the same as selecting the Affinity + #: scheme. The value is 2. + ALIGNED_AFFINITY = "AlignedAffinity" + #: Non-Aligned affinity guarantees that all replicas of each service will be placed on the same + #: nodes. Unlike Aligned Affinity, this does not guarantee that replicas of particular role will + #: be collocated. The value is 3. + NON_ALIGNED_AFFINITY = "NonAlignedAffinity" + +class ServiceEndpointRole(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The role of the replica where the endpoint is reported. + """ + + #: Indicates the service endpoint role is invalid. All Service Fabric enumerations have the + #: invalid type. The value is zero. + INVALID = "Invalid" + #: Indicates that the service endpoint is of a stateless service. The value is 1. + STATELESS = "Stateless" + #: Indicates that the service endpoint is of a primary replica of a stateful service. The value is + #: 2. + STATEFUL_PRIMARY = "StatefulPrimary" + #: Indicates that the service endpoint is of a secondary replica of a stateful service. The value + #: is 3. + STATEFUL_SECONDARY = "StatefulSecondary" + +class ServiceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of service (Stateless or Stateful). + """ + + #: Indicates the service kind is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: Does not use Service Fabric to make its state highly available or reliable. The value is 1. + STATELESS = "Stateless" + #: Uses Service Fabric to make its state or part of its state highly available and reliable. The + #: value is 2. + STATEFUL = "Stateful" + +class ServiceLoadMetricWeight(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Determines the metric weight relative to the other metrics that are configured for this + service. During runtime, if two metrics end up in conflict, the Cluster Resource Manager + prefers the metric with the higher weight. + """ + + #: Disables resource balancing for this metric. This value is zero. + ZERO = "Zero" + #: Specifies the metric weight of the service load as Low. The value is 1. + LOW = "Low" + #: Specifies the metric weight of the service load as Medium. The value is 2. + MEDIUM = "Medium" + #: Specifies the metric weight of the service load as High. The value is 3. + HIGH = "High" + +class ServiceOperationName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Specifies the current active life-cycle operation on a stateful service replica or stateless + service instance. + """ + + #: Reserved for future use. + UNKNOWN = "Unknown" + #: The service replica or instance is not going through any life-cycle changes. + NONE = "None" + #: The service replica or instance is being opened. + OPEN = "Open" + #: The service replica is changing roles. + CHANGE_ROLE = "ChangeRole" + #: The service replica or instance is being closed. + CLOSE = "Close" + #: The service replica or instance is being aborted. + ABORT = "Abort" + +class ServicePackageActivationMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The activation mode of service package to be used for a Service Fabric service. This is + specified at the time of creating the Service. + """ + + #: This is the default activation mode. With this activation mode, replicas or instances from + #: different partition(s) of service, on a given node, will share same activation of service + #: package on a node. The value is zero. + SHARED_PROCESS = "SharedProcess" + #: With this activation mode, each replica or instance of service, on a given node, will have its + #: own dedicated activation of service package on a node. The value is 1. + EXCLUSIVE_PROCESS = "ExclusiveProcess" + +class ServicePartitionKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of partitioning scheme used to partition the service. + """ + + #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: Indicates that there is only one partition, and SingletonPartitionSchemeDescription was + #: specified while creating the service. The value is 1. + SINGLETON = "Singleton" + #: Indicates that the partition is based on Int64 key ranges, and + #: UniformInt64RangePartitionSchemeDescription was specified while creating the service. The value + #: is 2. + INT64_RANGE = "Int64Range" + #: Indicates that the partition is based on string names, and NamedPartitionInformation was + #: specified while creating the service. The value is 3. + NAMED = "Named" + +class ServicePartitionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the service fabric service partition. + """ + + #: Indicates the partition status is invalid. All Service Fabric enumerations have the invalid + #: type. The value is zero. + INVALID = "Invalid" + #: Indicates that the partition is ready. This means that for a stateless service partition there + #: is at least one instance that is up and for a stateful service partition the number of ready + #: replicas is greater than or equal to the MinReplicaSetSize. The value is 1. + READY = "Ready" + #: Indicates that the partition is not ready. This status is returned when none of the other + #: states apply. The value is 2. + NOT_READY = "NotReady" + #: Indicates that the partition is in quorum loss. This means that number of replicas that are up + #: and participating in a replica set is less than MinReplicaSetSize for this partition. The value + #: is 3. + IN_QUORUM_LOSS = "InQuorumLoss" + #: Indicates that the partition is undergoing reconfiguration of its replica sets. This can happen + #: due to failover, upgrade, load balancing or addition or removal of replicas from the replica + #: set. The value is 4. + RECONFIGURING = "Reconfiguring" + #: Indicates that the partition is being deleted. The value is 5. + DELETING = "Deleting" + +class ServicePlacementPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of placement policy for a service fabric service. Following are the possible values. + """ + + #: Indicates the type of the placement policy is invalid. All Service Fabric enumerations have the + #: invalid type. The value is zero. + INVALID = "Invalid" + #: Indicates that the ServicePlacementPolicyDescription is of type + #: ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or + #: upgrade domain cannot be used for placement of this service. The value is 1. + INVALID_DOMAIN = "InvalidDomain" + #: Indicates that the ServicePlacementPolicyDescription is of type + #: ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the + #: service must be placed in a specific domain. The value is 2. + REQUIRE_DOMAIN = "RequireDomain" + #: Indicates that the ServicePlacementPolicyDescription is of type + #: ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the + #: Primary replica for the partitions of the service should be located in a particular domain as + #: an optimization. The value is 3. + PREFER_PRIMARY_DOMAIN = "PreferPrimaryDomain" + #: Indicates that the ServicePlacementPolicyDescription is of type + #: ServicePlacementRequireDomainDistributionPolicyDescription, indicating that the system will + #: disallow placement of any two replicas from the same partition in the same domain at any time. + #: The value is 4. + REQUIRE_DOMAIN_DISTRIBUTION = "RequireDomainDistribution" + #: Indicates that the ServicePlacementPolicyDescription is of type + #: ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible all + #: replicas of a particular partition of the service should be placed atomically. The value is 5. + NON_PARTIALLY_PLACE_SERVICE = "NonPartiallyPlaceService" + #: Indicates that the ServicePlacementPolicyDescription is of type + #: ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, which indicates that + #: multiple stateless instances of a particular partition of the service can be placed on a node. + #: The value is 6. + ALLOW_MULTIPLE_STATELESS_INSTANCES_ON_NODE = "AllowMultipleStatelessInstancesOnNode" + +class ServiceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the application. + """ + + #: Indicates the service status is unknown. The value is zero. + UNKNOWN = "Unknown" + #: Indicates the service status is active. The value is 1. + ACTIVE = "Active" + #: Indicates the service is upgrading. The value is 2. + UPGRADING = "Upgrading" + #: Indicates the service is being deleted. The value is 3. + DELETING = "Deleting" + #: Indicates the service is being created. The value is 4. + CREATING = "Creating" + #: Indicates creation or deletion was terminated due to persistent failures. Another create/delete + #: request can be accepted. The value is 5. + FAILED = "Failed" + +class ServiceTypeRegistrationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the service type registration on the node. + """ + + #: Indicates the registration status is invalid. All Service Fabric enumerations have the invalid + #: type. The value is zero. + INVALID = "Invalid" + #: Indicates that the service type is disabled on this node. A type gets disabled when there are + #: too many failures of the code package hosting the service type. If the service type is + #: disabled, new replicas of that service type will not be placed on the node until it is enabled + #: again. The service type is enabled again after the process hosting it comes up and re-registers + #: the type or a preconfigured time interval has passed. The value is 1. + DISABLED = "Disabled" + #: Indicates that the service type is enabled on this node. Replicas of this service type can be + #: placed on this node when the code package registers the service type. The value is 2. + ENABLED = "Enabled" + #: Indicates that the service type is enabled and registered on the node by a code package. + #: Replicas of this service type can now be placed on this node. The value is 3. + REGISTERED = "Registered" + +class SettingType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of the setting being given in value + """ + + #: The setting in clear text, will not be processed in any way and passed in as is. + CLEAR_TEXT = "ClearText" + #: The URI to a KeyVault secret version, will be resolved using the application's managed identity + #: (this type is only valid if the app was assigned a managed identity) before getting passed in. + KEY_VAULT_REFERENCE = "KeyVaultReference" + #: The reference to a SecretValue resource, will be resolved before getting passed in. + SECRET_VALUE_REFERENCE = "SecretValueReference" + +class SizeTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Volume size + """ + + SMALL = "Small" + MEDIUM = "Medium" + LARGE = "Large" + +class State(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The workflow state of the repair task. Valid initial states are Created, Claimed, and + Preparing. + """ + + #: Indicates that the repair task state is invalid. All Service Fabric enumerations have the + #: invalid value. + INVALID = "Invalid" + #: Indicates that the repair task has been created. + CREATED = "Created" + #: Indicates that the repair task has been claimed by a repair executor. + CLAIMED = "Claimed" + #: Indicates that the Repair Manager is preparing the system to handle the impact of the repair + #: task, usually by taking resources offline gracefully. + PREPARING = "Preparing" + #: Indicates that the repair task has been approved by the Repair Manager and is safe to execute. + APPROVED = "Approved" + #: Indicates that execution of the repair task is in progress. + EXECUTING = "Executing" + #: Indicates that the Repair Manager is restoring the system to its pre-repair state, usually by + #: bringing resources back online. + RESTORING = "Restoring" + #: Indicates that the repair task has completed, and no further state changes will occur. + COMPLETED = "Completed" + +class UpgradeDomainState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The state of the upgrade domain. + """ + + #: Indicates the upgrade domain state is invalid. All Service Fabric enumerations have the invalid + #: type. The value is zero. + INVALID = "Invalid" + #: The upgrade domain has not started upgrading yet. The value is 1. + PENDING = "Pending" + #: The upgrade domain is being upgraded but not complete yet. The value is 2. + IN_PROGRESS = "InProgress" + #: The upgrade domain has completed upgrade. The value is 3. + COMPLETED = "Completed" + +class UpgradeKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The kind of upgrade out of the following possible values. + """ + + #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: The upgrade progresses one upgrade domain at a time. The value is 1. + ROLLING = "Rolling" + +class UpgradeMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The mode used to monitor health during a rolling upgrade. The values are UnmonitoredAuto, + UnmonitoredManual, and Monitored. + """ + + #: Indicates the upgrade mode is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: The upgrade will proceed automatically without performing any health monitoring. The value is + #: 1. + UNMONITORED_AUTO = "UnmonitoredAuto" + #: The upgrade will stop after completing each upgrade domain, giving the opportunity to manually + #: monitor health before proceeding. The value is 2. + UNMONITORED_MANUAL = "UnmonitoredManual" + #: The upgrade will stop after completing each upgrade domain and automatically monitor health + #: before proceeding. The value is 3. + MONITORED = "Monitored" + +class UpgradeSortOrder(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Defines the order in which an upgrade proceeds through the cluster. + """ + + #: Indicates that this sort order is not valid. All Service Fabric enumerations have the invalid + #: type. The value is 0. + INVALID = "Invalid" + #: Indicates that the default sort order (as specified in cluster manifest) will be used. The + #: value is 1. + DEFAULT = "Default" + #: Indicates that forward numeric sort order (UD names sorted as numbers) will be used. The value + #: is 2. + NUMERIC = "Numeric" + #: Indicates that forward lexicographical sort order (UD names sorted as strings) will be used. + #: The value is 3. + LEXICOGRAPHICAL = "Lexicographical" + #: Indicates that reverse numeric sort order (UD names sorted as numbers) will be used. The value + #: is 4. + REVERSE_NUMERIC = "ReverseNumeric" + #: Indicates that reverse lexicographical sort order (UD names sorted as strings) will be used. + #: The value is 5. + REVERSE_LEXICOGRAPHICAL = "ReverseLexicographical" + +class UpgradeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The state of the upgrade domain. + """ + + #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: The upgrade is rolling back to the previous version but is not complete yet. The value is 1. + ROLLING_BACK_IN_PROGRESS = "RollingBackInProgress" + #: The upgrade has finished rolling back. The value is 2. + ROLLING_BACK_COMPLETED = "RollingBackCompleted" + #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an + #: explicit move next request in UnmonitoredManual mode or performing health checks in Monitored + #: mode. The value is 3. + ROLLING_FORWARD_PENDING = "RollingForwardPending" + #: The upgrade is rolling forward to the target version but is not complete yet. The value is 4. + ROLLING_FORWARD_IN_PROGRESS = "RollingForwardInProgress" + #: The upgrade has finished rolling forward. The value is 5. + ROLLING_FORWARD_COMPLETED = "RollingForwardCompleted" + #: The upgrade has failed and is unable to execute FailureAction. The value is 6. + FAILED = "Failed" + +class UpgradeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of upgrade out of the following possible values. + """ + + #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. + #: The value is zero. + INVALID = "Invalid" + #: The upgrade progresses one upgrade domain at a time. The value is 1. + ROLLING = "Rolling" + #: The upgrade gets restarted by force. The value is 2. + ROLLING_FORCE_RESTART = "Rolling_ForceRestart" + +class VolumeProvider(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the provider of the volume resource. + """ + + #: Provides volumes that are backed by Azure Files. + SF_AZURE_FILE = "SFAzureFile" diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/__init__.py index 2d275dce36ff..df6b66c53161 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/__init__.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/__init__.py @@ -1,14 +1,12 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from ._service_fabric_client_apis_operations import ServiceFabricClientAPIsOperationsMixin from ._mesh_secret_operations import MeshSecretOperations from ._mesh_secret_value_operations import MeshSecretValueOperations from ._mesh_volume_operations import MeshVolumeOperations @@ -18,9 +16,9 @@ from ._mesh_code_package_operations import MeshCodePackageOperations from ._mesh_service_replica_operations import MeshServiceReplicaOperations from ._mesh_gateway_operations import MeshGatewayOperations -from ._service_fabric_client_ap_is_operations import ServiceFabricClientAPIsOperationsMixin __all__ = [ + 'ServiceFabricClientAPIsOperationsMixin', 'MeshSecretOperations', 'MeshSecretValueOperations', 'MeshVolumeOperations', @@ -30,5 +28,4 @@ 'MeshCodePackageOperations', 'MeshServiceReplicaOperations', 'MeshGatewayOperations', - 'ServiceFabricClientAPIsOperationsMixin', ] diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_application_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_application_operations.py index 9982364b8c00..5f6723d52b0a 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_application_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_application_operations.py @@ -1,327 +1,338 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from msrest.pipeline import ClientRawResponse +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshApplicationOperations(object): """MeshApplicationOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - - self.config = config + self._config = config def create_or_update( - self, application_resource_name, application_resource_description, custom_headers=None, raw=False, **operation_config): + self, + application_resource_name, # type: str + application_resource_description, # type: "_models.ApplicationResourceDescription" + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.ApplicationResourceDescription"] """Creates or updates a Application resource. - Creates a Application resource with the specified name, description and - properties. If Application resource with the same name exists, then it - is updated with the specified description and properties. + Creates a Application resource with the specified name, description and properties. If + Application resource with the same name exists, then it is updated with the specified + description and properties. :param application_resource_name: The identity of the application. :type application_resource_name: str - :param application_resource_description: Description for creating a - Application resource. - :type application_resource_description: - ~azure.servicefabric.models.ApplicationResourceDescription - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationResourceDescription or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.ApplicationResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :param application_resource_description: Description for creating a Application resource. + :type application_resource_description: ~azure.servicefabric.models.ApplicationResourceDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError """ - api_version = "6.4-preview" + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct body + body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(application_resource_description, 'ApplicationResourceDescription') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 201, 202]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('ApplicationResourceDescription', response) + deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) + if response.status_code == 201: - deserialized = self._deserialize('ApplicationResourceDescription', response) + deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} + create_or_update.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore def get( - self, application_resource_name, custom_headers=None, raw=False, **operation_config): + self, + application_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.ApplicationResourceDescription" """Gets the Application resource with the given name. - Gets the information about the Application resource with the given - name. The information include the description and other properties of - the Application. + Gets the information about the Application resource with the given name. The information + include the description and other properties of the Application. :param application_resource_name: The identity of the application. :type application_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationResourceDescription or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.ApplicationResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError """ - api_version = "6.4-preview" + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationResourceDescription', response) + deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} + get.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore def delete( - self, application_resource_name, custom_headers=None, raw=False, **operation_config): + self, + application_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> None """Deletes the Application resource. Deletes the Application resource identified by the name. :param application_resource_name: The identity of the application. :type application_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError """ - api_version = "6.4-preview" + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} + delete.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore def list( - self, custom_headers=None, raw=False, **operation_config): + self, + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedApplicationResourceDescriptionList" """Lists all the application resources. - Gets the information about all application resources in a given - resource group. The information include the description and other - properties of the Application. - - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedApplicationResourceDescriptionList or ClientRawResponse - if raw=true - :rtype: - ~azure.servicefabric.models.PagedApplicationResourceDescriptionList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + Gets the information about all application resources in a given resource group. The information + include the description and other properties of the Application. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedApplicationResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedApplicationResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError """ - api_version = "6.4-preview" + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedApplicationResourceDescriptionList', response) + deserialized = self._deserialize('PagedApplicationResourceDescriptionList', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/Resources/Applications'} + list.metadata = {'url': '/Resources/Applications'} # type: ignore def get_upgrade_progress( - self, application_resource_name, custom_headers=None, raw=False, **operation_config): - """Gets the progress of the latest upgrade performed on this application - resource. + self, + application_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.ApplicationResourceUpgradeProgressInfo" + """Gets the progress of the latest upgrade performed on this application resource. - Gets the upgrade progress information about the Application resource - with the given name. The information include percentage of completion - and other upgrade state information of the Application resource. + Gets the upgrade progress information about the Application resource with the given name. The + information include percentage of completion and other upgrade state information of the + Application resource. :param application_resource_name: The identity of the application. :type application_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationResourceUpgradeProgressInfo or ClientRawResponse - if raw=true - :rtype: - ~azure.servicefabric.models.ApplicationResourceUpgradeProgressInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationResourceUpgradeProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationResourceUpgradeProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError """ - api_version = "7.0" + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResourceUpgradeProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" # Construct URL - url = self.get_upgrade_progress.metadata['url'] + url = self.get_upgrade_progress.metadata['url'] # type: ignore path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationResourceUpgradeProgressInfo', response) + deserialized = self._deserialize('ApplicationResourceUpgradeProgressInfo', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get_upgrade_progress.metadata = {'url': '/Resources/Applications/{applicationResourceName}/$/GetUpgradeProgress'} + get_upgrade_progress.metadata = {'url': '/Resources/Applications/{applicationResourceName}/$/GetUpgradeProgress'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_code_package_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_code_package_operations.py index 40390ca54420..6ae7fd989ea0 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_code_package_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_code_package_operations.py @@ -1,48 +1,61 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from msrest.pipeline import ClientRawResponse +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshCodePackageOperations(object): """MeshCodePackageOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - - self.config = config - self.api_version = "6.4-preview" + self._config = config def get_container_logs( - self, application_resource_name, service_resource_name, replica_name, code_package_name, tail=None, custom_headers=None, raw=False, **operation_config): + self, + application_resource_name, # type: str + service_resource_name, # type: str + replica_name, # type: str + code_package_name, # type: str + tail=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.ContainerLogs" """Gets the logs from the container. - Gets the logs for the container of the specified code package of the - service replica. + Gets the logs for the container of the specified code package of the service replica. :param application_resource_name: The identity of the application. :type application_resource_name: str @@ -52,56 +65,55 @@ def get_container_logs( :type replica_name: str :param code_package_name: The name of code package of the service. :type code_package_name: str - :param tail: Number of lines to show from the end of the logs. Default - is 100. 'all' to show the complete logs. + :param tail: Number of lines to show from the end of the logs. Default is 100. 'all' to show + the complete logs. :type tail: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ContainerLogs or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ContainerLogs or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ContainerLogs, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ContainerLogs + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerLogs"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.get_container_logs.metadata['url'] + url = self.get_container_logs.metadata['url'] # type: ignore path_format_arguments = { 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True), - 'codePackageName': self._serialize.url("code_package_name", code_package_name, 'str') + 'codePackageName': self._serialize.url("code_package_name", code_package_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if tail is not None: query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ContainerLogs', response) + deserialized = self._deserialize('ContainerLogs', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get_container_logs.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}/CodePackages/{codePackageName}/Logs'} + get_container_logs.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}/CodePackages/{codePackageName}/Logs'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_gateway_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_gateway_operations.py index 582ffc6910d6..3118dbad7a41 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_gateway_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_gateway_operations.py @@ -1,259 +1,279 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from msrest.pipeline import ClientRawResponse +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshGatewayOperations(object): """MeshGatewayOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - - self.config = config - self.api_version = "6.4-preview" + self._config = config def create_or_update( - self, gateway_resource_name, gateway_resource_description, custom_headers=None, raw=False, **operation_config): + self, + gateway_resource_name, # type: str + gateway_resource_description, # type: "_models.GatewayResourceDescription" + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.GatewayResourceDescription"] """Creates or updates a Gateway resource. - Creates a Gateway resource with the specified name, description and - properties. If Gateway resource with the same name exists, then it is - updated with the specified description and properties. Use Gateway - resource to provide public connectivity to application services. + Creates a Gateway resource with the specified name, description and properties. If Gateway + resource with the same name exists, then it is updated with the specified description and + properties. Use Gateway resource to provide public connectivity to application services. :param gateway_resource_name: The identity of the gateway. :type gateway_resource_name: str - :param gateway_resource_description: Description for creating a - Gateway resource. - :type gateway_resource_description: - ~azure.servicefabric.models.GatewayResourceDescription - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: GatewayResourceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.GatewayResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :param gateway_resource_description: Description for creating a Gateway resource. + :type gateway_resource_description: ~azure.servicefabric.models.GatewayResourceDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: GatewayResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.GatewayResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { - 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True) + 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct body + body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(gateway_resource_description, 'GatewayResourceDescription') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 201, 202]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('GatewayResourceDescription', response) + deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) + if response.status_code == 201: - deserialized = self._deserialize('GatewayResourceDescription', response) + deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} + create_or_update.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore def get( - self, gateway_resource_name, custom_headers=None, raw=False, **operation_config): + self, + gateway_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.GatewayResourceDescription" """Gets the Gateway resource with the given name. - Gets the information about the Gateway resource with the given name. - The information include the description and other properties of the - Gateway. + Gets the information about the Gateway resource with the given name. The information include + the description and other properties of the Gateway. :param gateway_resource_name: The identity of the gateway. :type gateway_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: GatewayResourceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.GatewayResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: GatewayResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.GatewayResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { - 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True) + 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('GatewayResourceDescription', response) + deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} + get.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore def delete( - self, gateway_resource_name, custom_headers=None, raw=False, **operation_config): + self, + gateway_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> None """Deletes the Gateway resource. Deletes the Gateway resource identified by the name. :param gateway_resource_name: The identity of the gateway. :type gateway_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { - 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True) + 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore def list( - self, custom_headers=None, raw=False, **operation_config): + self, + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedGatewayResourceDescriptionList" """Lists all the gateway resources. - Gets the information about all gateway resources in a given resource - group. The information include the description and other properties of - the Gateway. - - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedGatewayResourceDescriptionList or ClientRawResponse if - raw=true - :rtype: - ~azure.servicefabric.models.PagedGatewayResourceDescriptionList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + Gets the information about all gateway resources in a given resource group. The information + include the description and other properties of the Gateway. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedGatewayResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedGatewayResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedGatewayResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedGatewayResourceDescriptionList', response) + deserialized = self._deserialize('PagedGatewayResourceDescriptionList', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/Resources/Gateways'} + list.metadata = {'url': '/Resources/Gateways'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_network_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_network_operations.py index d0eaba348d13..8cc6feb394db 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_network_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_network_operations.py @@ -1,262 +1,284 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from msrest.pipeline import ClientRawResponse +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshNetworkOperations(object): """MeshNetworkOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - - self.config = config - self.api_version = "6.4-preview" + self._config = config def create_or_update( - self, network_resource_name, name, properties, custom_headers=None, raw=False, **operation_config): + self, + network_resource_name, # type: str + name, # type: str + properties, # type: "_models.NetworkResourceProperties" + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.NetworkResourceDescription"] """Creates or updates a Network resource. - Creates a Network resource with the specified name, description and - properties. If Network resource with the same name exists, then it is - updated with the specified description and properties. Network resource - provides connectivity between application services. + Creates a Network resource with the specified name, description and properties. If Network + resource with the same name exists, then it is updated with the specified description and + properties. Network resource provides connectivity between application services. :param network_resource_name: The identity of the network. :type network_resource_name: str :param name: Name of the Network resource. :type name: str :param properties: Describes properties of a network resource. - :type properties: - ~azure.servicefabric.models.NetworkResourceProperties - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: NetworkResourceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.NetworkResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :type properties: ~azure.servicefabric.models.NetworkResourceProperties + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NetworkResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NetworkResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError """ - network_resource_description = models.NetworkResourceDescription(name=name, properties=properties) + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NetworkResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _network_resource_description = _models.NetworkResourceDescription(name=name, properties=properties) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { - 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True) + 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct body - body_content = self._serialize.body(network_resource_description, 'NetworkResourceDescription') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_network_resource_description, 'NetworkResourceDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 201, 202]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('NetworkResourceDescription', response) + deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) + if response.status_code == 201: - deserialized = self._deserialize('NetworkResourceDescription', response) + deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/Resources/Networks/{networkResourceName}'} + create_or_update.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore def get( - self, network_resource_name, custom_headers=None, raw=False, **operation_config): + self, + network_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.NetworkResourceDescription" """Gets the Network resource with the given name. - Gets the information about the Network resource with the given name. - The information include the description and other properties of the - Network. + Gets the information about the Network resource with the given name. The information include + the description and other properties of the Network. :param network_resource_name: The identity of the network. :type network_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: NetworkResourceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.NetworkResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NetworkResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NetworkResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { - 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True) + 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NetworkResourceDescription', response) + deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/Resources/Networks/{networkResourceName}'} + get.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore def delete( - self, network_resource_name, custom_headers=None, raw=False, **operation_config): + self, + network_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> None """Deletes the Network resource. Deletes the Network resource identified by the name. :param network_resource_name: The identity of the network. :type network_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { - 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True) + 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete.metadata = {'url': '/Resources/Networks/{networkResourceName}'} + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore def list( - self, custom_headers=None, raw=False, **operation_config): + self, + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedNetworkResourceDescriptionList" """Lists all the network resources. - Gets the information about all network resources in a given resource - group. The information include the description and other properties of - the Network. - - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedNetworkResourceDescriptionList or ClientRawResponse if - raw=true - :rtype: - ~azure.servicefabric.models.PagedNetworkResourceDescriptionList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + Gets the information about all network resources in a given resource group. The information + include the description and other properties of the Network. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedNetworkResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedNetworkResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedNetworkResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedNetworkResourceDescriptionList', response) + deserialized = self._deserialize('PagedNetworkResourceDescriptionList', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/Resources/Networks'} + list.metadata = {'url': '/Resources/Networks'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_operations.py index 0c980f03c04e..af905f8c9172 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_operations.py @@ -1,50 +1,61 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from msrest.pipeline import ClientRawResponse +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshSecretOperations(object): """MeshSecretOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - - self.config = config - self.api_version = "6.4-preview" + self._config = config def create_or_update( - self, secret_resource_name, properties, name, custom_headers=None, raw=False, **operation_config): + self, + secret_resource_name, # type: str + properties, # type: "_models.SecretResourceProperties" + name, # type: str + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.SecretResourceDescription"] """Creates or updates a Secret resource. - Creates a Secret resource with the specified name, description and - properties. If Secret resource with the same name exists, then it is - updated with the specified description and properties. Once created, - the kind and contentType of a secret resource cannot be updated. + Creates a Secret resource with the specified name, description and properties. If Secret + resource with the same name exists, then it is updated with the specified description and + properties. Once created, the kind and contentType of a secret resource cannot be updated. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str @@ -52,208 +63,222 @@ def create_or_update( :type properties: ~azure.servicefabric.models.SecretResourceProperties :param name: Name of the Secret resource. :type name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: SecretResourceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.SecretResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError """ - secret_resource_description = models.SecretResourceDescription(properties=properties, name=name) + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SecretResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _secret_resource_description = _models.SecretResourceDescription(properties=properties, name=name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True) + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct body - body_content = self._serialize.body(secret_resource_description, 'SecretResourceDescription') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_secret_resource_description, 'SecretResourceDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 201, 202]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('SecretResourceDescription', response) + deserialized = self._deserialize('SecretResourceDescription', pipeline_response) + if response.status_code == 201: - deserialized = self._deserialize('SecretResourceDescription', response) + deserialized = self._deserialize('SecretResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} + create_or_update.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore def get( - self, secret_resource_name, custom_headers=None, raw=False, **operation_config): + self, + secret_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.SecretResourceDescription" """Gets the Secret resource with the given name. - Gets the information about the Secret resource with the given name. The - information include the description and other properties of the Secret. + Gets the information about the Secret resource with the given name. The information include the + description and other properties of the Secret. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: SecretResourceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.SecretResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True) + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SecretResourceDescription', response) + deserialized = self._deserialize('SecretResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} + get.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore def delete( - self, secret_resource_name, custom_headers=None, raw=False, **operation_config): + self, + secret_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> None """Deletes the Secret resource. Deletes the specified Secret resource and all of its named values. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True) + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore def list( - self, custom_headers=None, raw=False, **operation_config): + self, + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedSecretResourceDescriptionList" """Lists all the secret resources. - Gets the information about all secret resources in a given resource - group. The information include the description and other properties of - the Secret. - - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedSecretResourceDescriptionList or ClientRawResponse if - raw=true + Gets the information about all secret resources in a given resource group. The information + include the description and other properties of the Secret. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedSecretResourceDescriptionList, or the result of cls(response) :rtype: ~azure.servicefabric.models.PagedSecretResourceDescriptionList - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSecretResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedSecretResourceDescriptionList', response) + deserialized = self._deserialize('PagedSecretResourceDescriptionList', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/Resources/Secrets'} + list.metadata = {'url': '/Resources/Secrets'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_value_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_value_operations.py index a9202b1f9140..cb62b6c4e48c 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_value_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_value_operations.py @@ -1,341 +1,369 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from msrest.pipeline import ClientRawResponse +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshSecretValueOperations(object): """MeshSecretValueOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - - self.config = config - self.api_version = "6.4-preview" + self._config = config def add_value( - self, secret_resource_name, secret_value_resource_name, name, value=None, custom_headers=None, raw=False, **operation_config): - """Adds the specified value as a new version of the specified secret - resource. - - Creates a new value of the specified secret resource. The name of the - value is typically the version identifier. Once created the value - cannot be changed. + self, + secret_resource_name, # type: str + secret_value_resource_name, # type: str + name, # type: str + value=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.SecretValueResourceDescription"] + """Adds the specified value as a new version of the specified secret resource. + + Creates a new value of the specified secret resource. The name of the value is typically the + version identifier. Once created the value cannot be changed. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource - value which is typically the version identifier for the value. + :param secret_value_resource_name: The name of the secret resource value which is typically the + version identifier for the value. :type secret_value_resource_name: str :param name: Version identifier of the secret value. :type name: str :param value: The actual value of the secret. :type value: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: SecretValueResourceDescription or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.SecretValueResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretValueResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretValueResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError """ - secret_value_resource_description = models.SecretValueResourceDescription(name=name, value=value) + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SecretValueResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _secret_value_resource_description = _models.SecretValueResourceDescription(name=name, value=value) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" # Construct URL - url = self.add_value.metadata['url'] + url = self.add_value.metadata['url'] # type: ignore path_format_arguments = { 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True) + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct body - body_content = self._serialize.body(secret_value_resource_description, 'SecretValueResourceDescription') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_secret_value_resource_description, 'SecretValueResourceDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 201, 202]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('SecretValueResourceDescription', response) + deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) + if response.status_code == 201: - deserialized = self._deserialize('SecretValueResourceDescription', response) + deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - add_value.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} + add_value.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore def get( - self, secret_resource_name, secret_value_resource_name, custom_headers=None, raw=False, **operation_config): + self, + secret_resource_name, # type: str + secret_value_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.SecretValueResourceDescription" """Gets the specified secret value resource. - Get the information about the specified named secret value resources. - The information does not include the actual value of the secret. + Get the information about the specified named secret value resources. The information does not + include the actual value of the secret. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource - value which is typically the version identifier for the value. + :param secret_value_resource_name: The name of the secret resource value which is typically the + version identifier for the value. :type secret_value_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: SecretValueResourceDescription or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.SecretValueResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretValueResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretValueResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretValueResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True) + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SecretValueResourceDescription', response) + deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} + get.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore def delete( - self, secret_resource_name, secret_value_resource_name, custom_headers=None, raw=False, **operation_config): + self, + secret_resource_name, # type: str + secret_value_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> None """Deletes the specified value of the named secret resource. - Deletes the secret value resource identified by the name. The name of - the resource is typically the version associated with that value. - Deletion will fail if the specified value is in use. + Deletes the secret value resource identified by the name. The name of the resource is typically + the version associated with that value. Deletion will fail if the specified value is in use. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource - value which is typically the version identifier for the value. + :param secret_value_resource_name: The name of the secret resource value which is typically the + version identifier for the value. :type secret_value_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True) + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} + delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore def list( - self, secret_resource_name, custom_headers=None, raw=False, **operation_config): + self, + secret_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedSecretValueResourceDescriptionList" """List names of all values of the specified secret resource. - Gets information about all secret value resources of the specified - secret resource. The information includes the names of the secret value - resources, but not the actual values. + Gets information about all secret value resources of the specified secret resource. The + information includes the names of the secret value resources, but not the actual values. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedSecretValueResourceDescriptionList or ClientRawResponse - if raw=true - :rtype: - ~azure.servicefabric.models.PagedSecretValueResourceDescriptionList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedSecretValueResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedSecretValueResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSecretValueResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True) + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedSecretValueResourceDescriptionList', response) + deserialized = self._deserialize('PagedSecretValueResourceDescriptionList', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values'} + list.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values'} # type: ignore def show( - self, secret_resource_name, secret_value_resource_name, custom_headers=None, raw=False, **operation_config): + self, + secret_resource_name, # type: str + secret_value_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.SecretValue" """Lists the specified value of the secret resource. - Lists the decrypted value of the specified named value of the secret - resource. This is a privileged operation. + Lists the decrypted value of the specified named value of the secret resource. This is a + privileged operation. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource - value which is typically the version identifier for the value. + :param secret_value_resource_name: The name of the secret resource value which is typically the + version identifier for the value. :type secret_value_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: SecretValue or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.SecretValue or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SecretValue, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SecretValue + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretValue"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.show.metadata['url'] + url = self.show.metadata['url'] # type: ignore path_format_arguments = { 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True) + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SecretValue', response) + deserialized = self._deserialize('SecretValue', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - show.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}/list_value'} + show.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}/list_value'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_operations.py index 3f227dc25bf6..61b6e9f5c35f 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_operations.py @@ -1,155 +1,164 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from msrest.pipeline import ClientRawResponse +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshServiceOperations(object): """MeshServiceOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - - self.config = config - self.api_version = "6.4-preview" + self._config = config def get( - self, application_resource_name, service_resource_name, custom_headers=None, raw=False, **operation_config): + self, + application_resource_name, # type: str + service_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.ServiceResourceDescription" """Gets the Service resource with the given name. - Gets the information about the Service resource with the given name. - The information include the description and other properties of the - Service. + Gets the information about the Service resource with the given name. The information include + the description and other properties of the Service. :param application_resource_name: The identity of the application. :type application_resource_name: str :param service_resource_name: The identity of the service. :type service_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServiceResourceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServiceResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True) + 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceResourceDescription', response) + deserialized = self._deserialize('ServiceResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}'} + get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}'} # type: ignore def list( - self, application_resource_name, custom_headers=None, raw=False, **operation_config): + self, + application_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedServiceResourceDescriptionList" """Lists all the service resources. - Gets the information about all services of an application resource. The - information include the description and other properties of the - Service. + Gets the information about all services of an application resource. The information include the + description and other properties of the Service. :param application_resource_name: The identity of the application. :type application_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedServiceResourceDescriptionList or ClientRawResponse if - raw=true - :rtype: - ~azure.servicefabric.models.PagedServiceResourceDescriptionList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedServiceResourceDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedServiceResourceDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedServiceResourceDescriptionList', response) + deserialized = self._deserialize('PagedServiceResourceDescriptionList', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services'} + list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_replica_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_replica_operations.py index 29e0a80e299f..b89dd2c29a1f 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_replica_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_replica_operations.py @@ -1,49 +1,60 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from msrest.pipeline import ClientRawResponse +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshServiceReplicaOperations(object): """MeshServiceReplicaOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - - self.config = config - self.api_version = "6.4-preview" + self._config = config def get( - self, application_resource_name, service_resource_name, replica_name, custom_headers=None, raw=False, **operation_config): + self, + application_resource_name, # type: str + service_resource_name, # type: str + replica_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.ServiceReplicaDescription" """Gets the given replica of the service of an application. - Gets the information about the service replica with the given name. The - information include the description and other properties of the service - replica. + Gets the information about the service replica with the given name. The information include the + description and other properties of the service replica. :param application_resource_name: The identity of the application. :type application_resource_name: str @@ -51,109 +62,111 @@ def get( :type service_resource_name: str :param replica_name: Service Fabric replica name. :type replica_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServiceReplicaDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServiceReplicaDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceReplicaDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceReplicaDescription + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceReplicaDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), - 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True) + 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceReplicaDescription', response) + deserialized = self._deserialize('ServiceReplicaDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}'} + get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}'} # type: ignore def list( - self, application_resource_name, service_resource_name, custom_headers=None, raw=False, **operation_config): + self, + application_resource_name, # type: str + service_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedServiceReplicaDescriptionList" """Lists all the replicas of a service. - Gets the information about all replicas of a service. The information - include the description and other properties of the service replica. + Gets the information about all replicas of a service. The information include the description + and other properties of the service replica. :param application_resource_name: The identity of the application. :type application_resource_name: str :param service_resource_name: The identity of the service. :type service_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedServiceReplicaDescriptionList or ClientRawResponse if - raw=true + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedServiceReplicaDescriptionList, or the result of cls(response) :rtype: ~azure.servicefabric.models.PagedServiceReplicaDescriptionList - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceReplicaDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True) + 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedServiceReplicaDescriptionList', response) + deserialized = self._deserialize('PagedServiceReplicaDescriptionList', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas'} + list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_volume_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_volume_operations.py index 7288b5ab1de8..a9ebacf79033 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_volume_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_volume_operations.py @@ -1,256 +1,279 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from msrest.pipeline import ClientRawResponse +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshVolumeOperations(object): """MeshVolumeOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - - self.config = config - self.api_version = "6.4-preview" + self._config = config def create_or_update( - self, volume_resource_name, volume_resource_description, custom_headers=None, raw=False, **operation_config): + self, + volume_resource_name, # type: str + volume_resource_description, # type: "_models.VolumeResourceDescription" + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.VolumeResourceDescription"] """Creates or updates a Volume resource. - Creates a Volume resource with the specified name, description and - properties. If Volume resource with the same name exists, then it is - updated with the specified description and properties. + Creates a Volume resource with the specified name, description and properties. If Volume + resource with the same name exists, then it is updated with the specified description and + properties. :param volume_resource_name: The identity of the volume. :type volume_resource_name: str - :param volume_resource_description: Description for creating a Volume - resource. - :type volume_resource_description: - ~azure.servicefabric.models.VolumeResourceDescription - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: VolumeResourceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.VolumeResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :param volume_resource_description: Description for creating a Volume resource. + :type volume_resource_description: ~azure.servicefabric.models.VolumeResourceDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: VolumeResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.VolumeResourceDescription or None + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VolumeResourceDescription"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { - 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True) + 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct body + body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(volume_resource_description, 'VolumeResourceDescription') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 201, 202]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('VolumeResourceDescription', response) + deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) + if response.status_code == 201: - deserialized = self._deserialize('VolumeResourceDescription', response) + deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} + create_or_update.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore def get( - self, volume_resource_name, custom_headers=None, raw=False, **operation_config): + self, + volume_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "_models.VolumeResourceDescription" """Gets the Volume resource with the given name. - Gets the information about the Volume resource with the given name. The - information include the description and other properties of the Volume. + Gets the information about the Volume resource with the given name. The information include the + description and other properties of the Volume. :param volume_resource_name: The identity of the volume. :type volume_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: VolumeResourceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.VolumeResourceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: VolumeResourceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.VolumeResourceDescription + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.VolumeResourceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { - 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True) + 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('VolumeResourceDescription', response) + deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} + get.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore def delete( - self, volume_resource_name, custom_headers=None, raw=False, **operation_config): + self, + volume_resource_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> None """Deletes the Volume resource. Deletes the Volume resource identified by the name. :param volume_resource_name: The identity of the volume. :type volume_resource_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { - 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True) + 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore def list( - self, custom_headers=None, raw=False, **operation_config): + self, + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedVolumeResourceDescriptionList" """Lists all the volume resources. - Gets the information about all volume resources in a given resource - group. The information include the description and other properties of - the Volume. - - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedVolumeResourceDescriptionList or ClientRawResponse if - raw=true + Gets the information about all volume resources in a given resource group. The information + include the description and other properties of the Volume. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedVolumeResourceDescriptionList, or the result of cls(response) :rtype: ~azure.servicefabric.models.PagedVolumeResourceDescriptionList - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedVolumeResourceDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedVolumeResourceDescriptionList', response) + deserialized = self._deserialize('PagedVolumeResourceDescriptionList', pipeline_response) - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response + if cls: + return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/Resources/Volumes'} + list.metadata = {'url': '/Resources/Volumes'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_ap_is_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_ap_is_operations.py deleted file mode 100644 index d94ced015347..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_ap_is_operations.py +++ /dev/null @@ -1,16433 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.pipeline import ClientRawResponse -from .. import models - - -class ServiceFabricClientAPIsOperationsMixin(object): - - def get_cluster_manifest( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the Service Fabric cluster manifest. - - Get the Service Fabric cluster manifest. The cluster manifest contains - properties of the cluster that include different node types on the - cluster, - security configurations, fault, and upgrade domain topologies, etc. - These properties are specified as part of the ClusterConfig.JSON file - while deploying a stand-alone cluster. However, most of the information - in the cluster manifest - is generated internally by service fabric during cluster deployment in - other deployment scenarios (e.g. when using Azure portal). - The contents of the cluster manifest are for informational purposes - only and users are not expected to take a dependency on the format of - the file contents or its interpretation. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterManifest or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ClusterManifest or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_cluster_manifest.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterManifest', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_manifest.metadata = {'url': '/$/GetClusterManifest'} - - def get_cluster_health( - self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of a Service Fabric cluster. - - Use EventsHealthStateFilter to filter the collection of health events - reported on the cluster based on the health state. - Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter - to filter the collection of nodes and applications returned based on - their aggregated health state. - - :param nodes_health_state_filter: Allows filtering of the node health - state objects returned in the result of cluster health query - based on their health state. The possible values for this parameter - include integer value of one of the - following health states. Only nodes that match the filter are - returned. All nodes are used to evaluate the aggregated health state. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of nodes - with HealthState value of OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type nodes_health_state_filter: int - :param applications_health_state_filter: Allows filtering of the - application health state objects returned in the result of cluster - health - query based on their health state. - The possible values for this parameter include integer value obtained - from members or bitwise operations - on members of HealthStateFilter enumeration. Only applications that - match the filter are returned. - All applications are used to evaluate the aggregated health state. If - not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of - applications with HealthState value of OK (2) and Warning (4) are - returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type applications_health_state_filter: int - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param include_system_application_health_statistics: Indicates whether - the health statistics should include the fabric:/System application - health statistics. False by default. - If IncludeSystemApplicationHealthStatistics is set to true, the health - statistics include the entities that belong to the fabric:/System - application. - Otherwise, the query result includes health statistics only for user - applications. - The health statistics must be included in the query result for this - parameter to be applied. - :type include_system_application_health_statistics: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ClusterHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_cluster_health.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if nodes_health_state_filter is not None: - query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') - if applications_health_state_filter is not None: - query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if include_system_application_health_statistics is not None: - query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_health.metadata = {'url': '/$/GetClusterHealth'} - - def get_cluster_health_using_policy( - self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, application_health_policy_map=None, cluster_health_policy=None, custom_headers=None, raw=False, **operation_config): - """Gets the health of a Service Fabric cluster using the specified policy. - - Use EventsHealthStateFilter to filter the collection of health events - reported on the cluster based on the health state. - Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter - to filter the collection of nodes and applications returned based on - their aggregated health state. - Use ClusterHealthPolicies to override the health policies used to - evaluate the health. - - :param nodes_health_state_filter: Allows filtering of the node health - state objects returned in the result of cluster health query - based on their health state. The possible values for this parameter - include integer value of one of the - following health states. Only nodes that match the filter are - returned. All nodes are used to evaluate the aggregated health state. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of nodes - with HealthState value of OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type nodes_health_state_filter: int - :param applications_health_state_filter: Allows filtering of the - application health state objects returned in the result of cluster - health - query based on their health state. - The possible values for this parameter include integer value obtained - from members or bitwise operations - on members of HealthStateFilter enumeration. Only applications that - match the filter are returned. - All applications are used to evaluate the aggregated health state. If - not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of - applications with HealthState value of OK (2) and Warning (4) are - returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type applications_health_state_filter: int - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param include_system_application_health_statistics: Indicates whether - the health statistics should include the fabric:/System application - health statistics. False by default. - If IncludeSystemApplicationHealthStatistics is set to true, the health - statistics include the entities that belong to the fabric:/System - application. - Otherwise, the query result includes health statistics only for user - applications. - The health statistics must be included in the query result for this - parameter to be applied. - :type include_system_application_health_statistics: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy_map: Defines a map that contains - specific application health policies for different applications. - Each entry specifies as key the application name and as value an - ApplicationHealthPolicy used to evaluate the application health. - If an application is not specified in the map, the application health - evaluation uses the ApplicationHealthPolicy found in its application - manifest or the default application health policy (if no health policy - is defined in the manifest). - The map is empty by default. - :type application_health_policy_map: - list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] - :param cluster_health_policy: Defines a health policy used to evaluate - the health of the cluster or of a cluster node. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ClusterHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - cluster_health_policies = None - if application_health_policy_map is not None or cluster_health_policy is not None: - cluster_health_policies = models.ClusterHealthPolicies(application_health_policy_map=application_health_policy_map, cluster_health_policy=cluster_health_policy) - - api_version = "6.0" - - # Construct URL - url = self.get_cluster_health_using_policy.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if nodes_health_state_filter is not None: - query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') - if applications_health_state_filter is not None: - query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if include_system_application_health_statistics is not None: - query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if cluster_health_policies is not None: - body_content = self._serialize.body(cluster_health_policies, 'ClusterHealthPolicies') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_health_using_policy.metadata = {'url': '/$/GetClusterHealth'} - - def get_cluster_health_chunk( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of a Service Fabric cluster using health chunks. - - Gets the health of a Service Fabric cluster using health chunks. - Includes the aggregated health state of the cluster, but none of the - cluster entities. - To expand the cluster health and get the health state of all or some of - the entities, use the POST URI and specify the cluster health chunk - query description. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterHealthChunk or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ClusterHealthChunk or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_cluster_health_chunk.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterHealthChunk', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_health_chunk.metadata = {'url': '/$/GetClusterHealthChunk'} - - def get_cluster_health_chunk_using_policy_and_advanced_filters( - self, cluster_health_chunk_query_description=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of a Service Fabric cluster using health chunks. - - Gets the health of a Service Fabric cluster using health chunks. The - health evaluation is done based on the input cluster health chunk query - description. - The query description allows users to specify health policies for - evaluating the cluster and its children. - Users can specify very flexible filters to select which cluster - entities to return. The selection can be done based on the entities - health state and based on the hierarchy. - The query can return multi-level children of the entities based on the - specified filters. For example, it can return one application with a - specified name, and for this application, return - only services that are in Error or Warning, and all partitions and - replicas for one of these services. - - :param cluster_health_chunk_query_description: Describes the cluster - and application health policies used to evaluate the cluster health - and the filters to select which cluster entities to be returned. - If the cluster health policy is present, it is used to evaluate the - cluster events and the cluster nodes. If not present, the health - evaluation uses the cluster health policy defined in the cluster - manifest or the default cluster health policy. - By default, each application is evaluated using its specific - application health policy, defined in the application manifest, or the - default health policy, if no policy is defined in manifest. - If the application health policy map is specified, and it has an entry - for an application, the specified application health policy - is used to evaluate the application health. - Users can specify very flexible filters to select which cluster - entities to include in response. The selection can be done based on - the entities health state and based on the hierarchy. - The query can return multi-level children of the entities based on the - specified filters. For example, it can return one application with a - specified name, and for this application, return - only services that are in Error or Warning, and all partitions and - replicas for one of these services. - :type cluster_health_chunk_query_description: - ~azure.servicefabric.models.ClusterHealthChunkQueryDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterHealthChunk or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ClusterHealthChunk or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_cluster_health_chunk_using_policy_and_advanced_filters.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if cluster_health_chunk_query_description is not None: - body_content = self._serialize.body(cluster_health_chunk_query_description, 'ClusterHealthChunkQueryDescription') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterHealthChunk', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_health_chunk_using_policy_and_advanced_filters.metadata = {'url': '/$/GetClusterHealthChunk'} - - def report_cluster_health( - self, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Sends a health report on the Service Fabric cluster. - - Sends a health report on a Service Fabric cluster. The report must - contain the information about the source of the health report and - property on which it is reported. - The report is sent to a Service Fabric gateway node, which forwards to - the health store. - The report may be accepted by the gateway, but rejected by the health - store after extra validation. - For example, the health store may reject the report because of an - invalid parameter, like a stale sequence number. - To see whether the report was applied in the health store, run - GetClusterHealth and check that the report appears in the HealthEvents - section. - - :param health_information: Describes the health information for the - health report. This information needs to be present in all of the - health reports sent to the health manager. - :type health_information: - ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be - sent immediately. - A health report is sent to a Service Fabric gateway Application, which - forwards to the health store. - If Immediate is set to true, the report is sent immediately from HTTP - Gateway to the health store, regardless of the fabric client settings - that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as - possible. - Depending on timing and other conditions, sending the report may still - fail, for example if the HTTP Gateway is closed or the message doesn't - reach the Gateway. - If Immediate is set to false, the report is sent based on the health - client settings from the HTTP Gateway. Therefore, it will be batched - according to the HealthReportSendInterval configuration. - This is the recommended setting because it allows the health client to - optimize health reporting messages to health store as well as health - report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.report_cluster_health.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(health_information, 'HealthInformation') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - report_cluster_health.metadata = {'url': '/$/ReportClusterHealth'} - - def get_provisioned_fabric_code_version_info_list( - self, code_version=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets a list of fabric code versions that are provisioned in a Service - Fabric cluster. - - Gets a list of information about fabric code versions that are - provisioned in the cluster. The parameter CodeVersion can be used to - optionally filter the output to only that particular version. - - :param code_version: The product version of Service Fabric. - :type code_version: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.FabricCodeVersionInfo] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_provisioned_fabric_code_version_info_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if code_version is not None: - query_parameters['CodeVersion'] = self._serialize.query("code_version", code_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[FabricCodeVersionInfo]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_provisioned_fabric_code_version_info_list.metadata = {'url': '/$/GetProvisionedCodeVersions'} - - def get_provisioned_fabric_config_version_info_list( - self, config_version=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets a list of fabric config versions that are provisioned in a Service - Fabric cluster. - - Gets a list of information about fabric config versions that are - provisioned in the cluster. The parameter ConfigVersion can be used to - optionally filter the output to only that particular version. - - :param config_version: The config version of Service Fabric. - :type config_version: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.FabricConfigVersionInfo] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_provisioned_fabric_config_version_info_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if config_version is not None: - query_parameters['ConfigVersion'] = self._serialize.query("config_version", config_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[FabricConfigVersionInfo]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_provisioned_fabric_config_version_info_list.metadata = {'url': '/$/GetProvisionedConfigVersions'} - - def get_cluster_upgrade_progress( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the progress of the current cluster upgrade. - - Gets the current progress of the ongoing cluster upgrade. If no upgrade - is currently in progress, get the last state of the previous cluster - upgrade. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterUpgradeProgressObject or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ClusterUpgradeProgressObject or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_cluster_upgrade_progress.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterUpgradeProgressObject', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_upgrade_progress.metadata = {'url': '/$/GetUpgradeProgress'} - - def get_cluster_configuration( - self, configuration_api_version, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the Service Fabric standalone cluster configuration. - - The cluster configuration contains properties of the cluster that - include different node types on the cluster, - security configurations, fault, and upgrade domain topologies, etc. - - :param configuration_api_version: The API version of the Standalone - cluster json configuration. - :type configuration_api_version: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterConfiguration or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ClusterConfiguration or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_cluster_configuration.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ConfigurationApiVersion'] = self._serialize.query("configuration_api_version", configuration_api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterConfiguration', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_configuration.metadata = {'url': '/$/GetClusterConfiguration'} - - def get_cluster_configuration_upgrade_status( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the cluster configuration upgrade status of a Service Fabric - standalone cluster. - - Get the cluster configuration upgrade status details of a Service - Fabric standalone cluster. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterConfigurationUpgradeStatusInfo or ClientRawResponse if - raw=true - :rtype: - ~azure.servicefabric.models.ClusterConfigurationUpgradeStatusInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_cluster_configuration_upgrade_status.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterConfigurationUpgradeStatusInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_configuration_upgrade_status.metadata = {'url': '/$/GetClusterConfigurationUpgradeStatus'} - - def get_upgrade_orchestration_service_state( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the service state of Service Fabric Upgrade Orchestration Service. - - Get the service state of Service Fabric Upgrade Orchestration Service. - This API is internally used for support purposes. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: UpgradeOrchestrationServiceState or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceState - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_upgrade_orchestration_service_state.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UpgradeOrchestrationServiceState', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_upgrade_orchestration_service_state.metadata = {'url': '/$/GetUpgradeOrchestrationServiceState'} - - def set_upgrade_orchestration_service_state( - self, timeout=60, service_state=None, custom_headers=None, raw=False, **operation_config): - """Update the service state of Service Fabric Upgrade Orchestration - Service. - - Update the service state of Service Fabric Upgrade Orchestration - Service. This API is internally used for support purposes. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param service_state: The state of Service Fabric Upgrade - Orchestration Service. - :type service_state: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: UpgradeOrchestrationServiceStateSummary or ClientRawResponse - if raw=true - :rtype: - ~azure.servicefabric.models.UpgradeOrchestrationServiceStateSummary or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - upgrade_orchestration_service_state = models.UpgradeOrchestrationServiceState(service_state=service_state) - - api_version = "6.0" - - # Construct URL - url = self.set_upgrade_orchestration_service_state.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(upgrade_orchestration_service_state, 'UpgradeOrchestrationServiceState') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UpgradeOrchestrationServiceStateSummary', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - set_upgrade_orchestration_service_state.metadata = {'url': '/$/SetUpgradeOrchestrationServiceState'} - - def provision_cluster( - self, timeout=60, code_file_path=None, cluster_manifest_file_path=None, custom_headers=None, raw=False, **operation_config): - """Provision the code or configuration packages of a Service Fabric - cluster. - - Validate and provision the code or configuration packages of a Service - Fabric cluster. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param code_file_path: The cluster code package file path. - :type code_file_path: str - :param cluster_manifest_file_path: The cluster manifest file path. - :type cluster_manifest_file_path: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - provision_fabric_description = models.ProvisionFabricDescription(code_file_path=code_file_path, cluster_manifest_file_path=cluster_manifest_file_path) - - api_version = "6.0" - - # Construct URL - url = self.provision_cluster.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(provision_fabric_description, 'ProvisionFabricDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - provision_cluster.metadata = {'url': '/$/Provision'} - - def unprovision_cluster( - self, timeout=60, code_version=None, config_version=None, custom_headers=None, raw=False, **operation_config): - """Unprovision the code or configuration packages of a Service Fabric - cluster. - - It is supported to unprovision code and configuration separately. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param code_version: The cluster code package version. - :type code_version: str - :param config_version: The cluster manifest version. - :type config_version: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - unprovision_fabric_description = models.UnprovisionFabricDescription(code_version=code_version, config_version=config_version) - - api_version = "6.0" - - # Construct URL - url = self.unprovision_cluster.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(unprovision_fabric_description, 'UnprovisionFabricDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - unprovision_cluster.metadata = {'url': '/$/Unprovision'} - - def rollback_cluster_upgrade( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Roll back the upgrade of a Service Fabric cluster. - - Roll back the code or configuration upgrade of a Service Fabric - cluster. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.rollback_cluster_upgrade.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - rollback_cluster_upgrade.metadata = {'url': '/$/RollbackUpgrade'} - - def resume_cluster_upgrade( - self, upgrade_domain, timeout=60, custom_headers=None, raw=False, **operation_config): - """Make the cluster upgrade move on to the next upgrade domain. - - Make the cluster code or configuration upgrade move on to the next - upgrade domain if appropriate. - - :param upgrade_domain: The next upgrade domain for this cluster - upgrade. - :type upgrade_domain: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - resume_cluster_upgrade_description = models.ResumeClusterUpgradeDescription(upgrade_domain=upgrade_domain) - - api_version = "6.0" - - # Construct URL - url = self.resume_cluster_upgrade.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(resume_cluster_upgrade_description, 'ResumeClusterUpgradeDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - resume_cluster_upgrade.metadata = {'url': '/$/MoveToNextUpgradeDomain'} - - def start_cluster_upgrade( - self, start_cluster_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Start upgrading the code or configuration version of a Service Fabric - cluster. - - Validate the supplied upgrade parameters and start upgrading the code - or configuration version of a Service Fabric cluster if the parameters - are valid. - - :param start_cluster_upgrade_description: Describes the parameters for - starting a cluster upgrade. - :type start_cluster_upgrade_description: - ~azure.servicefabric.models.StartClusterUpgradeDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.start_cluster_upgrade.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(start_cluster_upgrade_description, 'StartClusterUpgradeDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_cluster_upgrade.metadata = {'url': '/$/Upgrade'} - - def start_cluster_configuration_upgrade( - self, cluster_configuration_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Start upgrading the configuration of a Service Fabric standalone - cluster. - - Validate the supplied configuration upgrade parameters and start - upgrading the cluster configuration if the parameters are valid. - - :param cluster_configuration_upgrade_description: Parameters for a - standalone cluster configuration upgrade. - :type cluster_configuration_upgrade_description: - ~azure.servicefabric.models.ClusterConfigurationUpgradeDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.start_cluster_configuration_upgrade.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(cluster_configuration_upgrade_description, 'ClusterConfigurationUpgradeDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_cluster_configuration_upgrade.metadata = {'url': '/$/StartClusterConfigurationUpgrade'} - - def update_cluster_upgrade( - self, update_cluster_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Update the upgrade parameters of a Service Fabric cluster upgrade. - - Update the upgrade parameters used during a Service Fabric cluster - upgrade. - - :param update_cluster_upgrade_description: Parameters for updating a - cluster upgrade. - :type update_cluster_upgrade_description: - ~azure.servicefabric.models.UpdateClusterUpgradeDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.update_cluster_upgrade.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(update_cluster_upgrade_description, 'UpdateClusterUpgradeDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - update_cluster_upgrade.metadata = {'url': '/$/UpdateUpgrade'} - - def get_aad_metadata( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the Azure Active Directory metadata used for secured connection to - cluster. - - Gets the Azure Active Directory metadata used for secured connection to - cluster. - This API is not supposed to be called separately. It provides - information needed to set up an Azure Active Directory secured - connection with a Service Fabric cluster. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: AadMetadataObject or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.AadMetadataObject or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_aad_metadata.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('AadMetadataObject', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_aad_metadata.metadata = {'url': '/$/GetAadMetadata'} - - def get_cluster_version( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the current Service Fabric cluster version. - - If a cluster upgrade is happening, then this API will return the lowest - (older) version of the current and target cluster runtime versions. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterVersion or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ClusterVersion or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_cluster_version.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterVersion', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_version.metadata = {'url': '/$/GetClusterVersion'} - - def get_cluster_load( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the load of a Service Fabric cluster. - - Retrieves the load information of a Service Fabric cluster for all the - metrics that have load or capacity defined. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ClusterLoadInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ClusterLoadInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_cluster_load.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ClusterLoadInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_load.metadata = {'url': '/$/GetLoadInformation'} - - def toggle_verbose_service_placement_health_reporting( - self, enabled, timeout=60, custom_headers=None, raw=False, **operation_config): - """Changes the verbosity of service placement health reporting. - - If verbosity is set to true, then detailed health reports will be - generated when replicas cannot be placed or dropped. - If verbosity is set to false, then no health reports will be generated - when replicas cannot be placed or dropped. - - :param enabled: The verbosity of service placement health reporting. - :type enabled: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.toggle_verbose_service_placement_health_reporting.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['Enabled'] = self._serialize.query("enabled", enabled, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - toggle_verbose_service_placement_health_reporting.metadata = {'url': '/$/ToggleVerboseServicePlacementHealthReporting'} - - def get_node_info_list( - self, continuation_token=None, node_status_filter="default", max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of nodes in the Service Fabric cluster. - - The response includes the name, status, ID, health, uptime, and other - details about the nodes. - - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param node_status_filter: Allows filtering the nodes based on the - NodeStatus. Only the nodes that are matching the specified filter - value will be returned. The filter value can be one of the following. - Possible values include: 'default', 'all', 'up', 'down', 'enabling', - 'disabling', 'disabled', 'unknown', 'removed' - :type node_status_filter: str or - ~azure.servicefabric.models.NodeStatusFilter - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedNodeInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedNodeInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.3" - - # Construct URL - url = self.get_node_info_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if node_status_filter is not None: - query_parameters['NodeStatusFilter'] = self._serialize.query("node_status_filter", node_status_filter, 'str') - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedNodeInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_node_info_list.metadata = {'url': '/Nodes'} - - def get_node_info( - self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about a specific node in the Service Fabric - cluster. - - The response includes the name, status, ID, health, uptime, and other - details about the node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: NodeInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.NodeInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_node_info.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NodeInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_node_info.metadata = {'url': '/Nodes/{nodeName}'} - - def get_node_health( - self, node_name, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of a Service Fabric node. - - Gets the health of a Service Fabric node. Use EventsHealthStateFilter - to filter the collection of health events reported on the node based on - the health state. If the node that you specify by name does not exist - in the health store, this returns an error. - - :param node_name: The name of the node. - :type node_name: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: NodeHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.NodeHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_node_health.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NodeHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_node_health.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} - - def get_node_health_using_policy( - self, node_name, events_health_state_filter=0, cluster_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of a Service Fabric node, by using the specified health - policy. - - Gets the health of a Service Fabric node. Use EventsHealthStateFilter - to filter the collection of health events reported on the node based on - the health state. Use ClusterHealthPolicy in the POST body to override - the health policies used to evaluate the health. If the node that you - specify by name does not exist in the health store, this returns an - error. - - :param node_name: The name of the node. - :type node_name: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param cluster_health_policy: Describes the health policies used to - evaluate the health of a cluster or node. If not present, the health - evaluation uses the health policy from cluster manifest or the default - health policy. - :type cluster_health_policy: - ~azure.servicefabric.models.ClusterHealthPolicy - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: NodeHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.NodeHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_node_health_using_policy.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if cluster_health_policy is not None: - body_content = self._serialize.body(cluster_health_policy, 'ClusterHealthPolicy') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NodeHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_node_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} - - def report_node_health( - self, node_name, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Sends a health report on the Service Fabric node. - - Reports health state of the specified Service Fabric node. The report - must contain the information about the source of the health report and - property on which it is reported. - The report is sent to a Service Fabric gateway node, which forwards to - the health store. - The report may be accepted by the gateway, but rejected by the health - store after extra validation. - For example, the health store may reject the report because of an - invalid parameter, like a stale sequence number. - To see whether the report was applied in the health store, run - GetNodeHealth and check that the report appears in the HealthEvents - section. - - :param node_name: The name of the node. - :type node_name: str - :param health_information: Describes the health information for the - health report. This information needs to be present in all of the - health reports sent to the health manager. - :type health_information: - ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be - sent immediately. - A health report is sent to a Service Fabric gateway Application, which - forwards to the health store. - If Immediate is set to true, the report is sent immediately from HTTP - Gateway to the health store, regardless of the fabric client settings - that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as - possible. - Depending on timing and other conditions, sending the report may still - fail, for example if the HTTP Gateway is closed or the message doesn't - reach the Gateway. - If Immediate is set to false, the report is sent based on the health - client settings from the HTTP Gateway. Therefore, it will be batched - according to the HealthReportSendInterval configuration. - This is the recommended setting because it allows the health client to - optimize health reporting messages to health store as well as health - report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.report_node_health.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(health_information, 'HealthInformation') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - report_node_health.metadata = {'url': '/Nodes/{nodeName}/$/ReportHealth'} - - def get_node_load_info( - self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the load information of a Service Fabric node. - - Retrieves the load information of a Service Fabric node for all the - metrics that have load or capacity defined. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: NodeLoadInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.NodeLoadInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_node_load_info.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NodeLoadInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_node_load_info.metadata = {'url': '/Nodes/{nodeName}/$/GetLoadInformation'} - - def disable_node( - self, node_name, timeout=60, deactivation_intent=None, custom_headers=None, raw=False, **operation_config): - """Deactivate a Service Fabric cluster node with the specified - deactivation intent. - - Deactivate a Service Fabric cluster node with the specified - deactivation intent. Once the deactivation is in progress, the - deactivation intent can be increased, but not decreased (for example, a - node that is deactivated with the Pause intent can be deactivated - further with Restart, but not the other way around. Nodes may be - reactivated using the Activate a node operation any time after they are - deactivated. If the deactivation is not complete, this will cancel the - deactivation. A node that goes down and comes back up while deactivated - will still need to be reactivated before services will be placed on - that node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param deactivation_intent: Describes the intent or reason for - deactivating the node. The possible values are following. Possible - values include: 'Pause', 'Restart', 'RemoveData' - :type deactivation_intent: str or - ~azure.servicefabric.models.DeactivationIntent - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - deactivation_intent_description = models.DeactivationIntentDescription(deactivation_intent=deactivation_intent) - - api_version = "6.0" - - # Construct URL - url = self.disable_node.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(deactivation_intent_description, 'DeactivationIntentDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - disable_node.metadata = {'url': '/Nodes/{nodeName}/$/Deactivate'} - - def enable_node( - self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Activate a Service Fabric cluster node that is currently deactivated. - - Activates a Service Fabric cluster node that is currently deactivated. - Once activated, the node will again become a viable target for placing - new replicas, and any deactivated replicas remaining on the node will - be reactivated. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.enable_node.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - enable_node.metadata = {'url': '/Nodes/{nodeName}/$/Activate'} - - def remove_node_state( - self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Notifies Service Fabric that the persisted state on a node has been - permanently removed or lost. - - This implies that it is not possible to recover the persisted state of - that node. This generally happens if a hard disk has been wiped clean, - or if a hard disk crashes. The node has to be down for this operation - to be successful. This operation lets Service Fabric know that the - replicas on that node no longer exist, and that Service Fabric should - stop waiting for those replicas to come back up. Do not run this cmdlet - if the state on the node has not been removed and the node can come - back up with its state intact. Starting from Service Fabric 6.5, in - order to use this API for seed nodes, please change the seed nodes to - regular (non-seed) nodes and then invoke this API to remove the node - state. If the cluster is running on Azure, after the seed node goes - down, Service Fabric will try to change it to a non-seed node - automatically. To make this happen, make sure the number of non-seed - nodes in the primary node type is no less than the number of Down seed - nodes. If necessary, add more nodes to the primary node type to achieve - this. For standalone cluster, if the Down seed node is not expected to - come back up with its state intact, please remove the node from the - cluster, see - https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-windows-server-add-remove-nodes. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.remove_node_state.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - remove_node_state.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeState'} - - def restart_node( - self, node_name, node_instance_id="0", timeout=60, create_fabric_dump="False", custom_headers=None, raw=False, **operation_config): - """Restarts a Service Fabric cluster node. - - Restarts a Service Fabric cluster node that is already started. - - :param node_name: The name of the node. - :type node_name: str - :param node_instance_id: The instance ID of the target node. If - instance ID is specified the node is restarted only if it matches with - the current instance of the node. A default value of "0" would match - any instance ID. The instance ID can be obtained using get node query. - :type node_instance_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param create_fabric_dump: Specify True to create a dump of the fabric - node process. This is case-sensitive. Possible values include: - 'False', 'True' - :type create_fabric_dump: str or - ~azure.servicefabric.models.CreateFabricDump - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - restart_node_description = models.RestartNodeDescription(node_instance_id=node_instance_id, create_fabric_dump=create_fabric_dump) - - api_version = "6.0" - - # Construct URL - url = self.restart_node.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(restart_node_description, 'RestartNodeDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - restart_node.metadata = {'url': '/Nodes/{nodeName}/$/Restart'} - - def remove_configuration_overrides( - self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Removes configuration overrides on the specified node. - - This api allows removing all existing configuration overrides on - specified node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "7.0" - - # Construct URL - url = self.remove_configuration_overrides.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - remove_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/RemoveConfigurationOverrides'} - - def get_configuration_overrides( - self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of configuration overrides on the specified node. - - This api allows getting all existing configuration overrides on the - specified node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ConfigParameterOverride] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "7.0" - - # Construct URL - url = self.get_configuration_overrides.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ConfigParameterOverride]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/GetConfigurationOverrides'} - - def add_configuration_parameter_overrides( - self, node_name, config_parameter_override_list, force=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Adds the list of configuration overrides on the specified node. - - This api allows adding all existing configuration overrides on the - specified node. - - :param node_name: The name of the node. - :type node_name: str - :param config_parameter_override_list: Description for adding list of - configuration overrides. - :type config_parameter_override_list: - list[~azure.servicefabric.models.ConfigParameterOverride] - :param force: Force adding configuration overrides on specified nodes. - :type force: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "7.0" - - # Construct URL - url = self.add_configuration_parameter_overrides.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force is not None: - query_parameters['Force'] = self._serialize.query("force", force, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(config_parameter_override_list, '[ConfigParameterOverride]') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - add_configuration_parameter_overrides.metadata = {'url': '/Nodes/{nodeName}/$/AddConfigurationParameterOverrides'} - - def get_application_type_info_list( - self, application_type_definition_kind_filter=0, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of application types in the Service Fabric cluster. - - Returns the information about the application types that are - provisioned or in the process of being provisioned in the Service - Fabric cluster. Each version of an application type is returned as one - application type. The response includes the name, version, status, and - other details about the application type. This is a paged query, - meaning that if not all of the application types fit in a page, one - page of results is returned as well as a continuation token, which can - be used to get the next page. For example, if there are 10 application - types but a page only fits the first three application types, or if max - results is set to 3, then three is returned. To access the rest of the - results, retrieve subsequent pages by using the returned continuation - token in the next query. An empty continuation token is returned if - there are no subsequent pages. - - :param application_type_definition_kind_filter: Used to filter on - ApplicationTypeDefinitionKind which is the mechanism used to define a - Service Fabric application type. - - Default - Default value, which performs the same function as - selecting "All". The value is 0. - - All - Filter that matches input with any - ApplicationTypeDefinitionKind value. The value is 65535. - - ServiceFabricApplicationPackage - Filter that matches input with - ApplicationTypeDefinitionKind value ServiceFabricApplicationPackage. - The value is 1. - - Compose - Filter that matches input with - ApplicationTypeDefinitionKind value Compose. The value is 2. - :type application_type_definition_kind_filter: int - :param exclude_application_parameters: The flag that specifies whether - application parameters will be excluded from the result. - :type exclude_application_parameters: bool - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedApplicationTypeInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_application_type_info_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if application_type_definition_kind_filter is not None: - query_parameters['ApplicationTypeDefinitionKindFilter'] = self._serialize.query("application_type_definition_kind_filter", application_type_definition_kind_filter, 'int') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedApplicationTypeInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_type_info_list.metadata = {'url': '/ApplicationTypes'} - - def get_application_type_info_list_by_name( - self, application_type_name, application_type_version=None, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of application types in the Service Fabric cluster - matching exactly the specified name. - - Returns the information about the application types that are - provisioned or in the process of being provisioned in the Service - Fabric cluster. These results are of application types whose name match - exactly the one specified as the parameter, and which comply with the - given query parameters. All versions of the application type matching - the application type name are returned, with each version returned as - one application type. The response includes the name, version, status, - and other details about the application type. This is a paged query, - meaning that if not all of the application types fit in a page, one - page of results is returned as well as a continuation token, which can - be used to get the next page. For example, if there are 10 application - types but a page only fits the first three application types, or if max - results is set to 3, then three is returned. To access the rest of the - results, retrieve subsequent pages by using the returned continuation - token in the next query. An empty continuation token is returned if - there are no subsequent pages. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param exclude_application_parameters: The flag that specifies whether - application parameters will be excluded from the result. - :type exclude_application_parameters: bool - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedApplicationTypeInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_application_type_info_list_by_name.metadata['url'] - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if application_type_version is not None: - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedApplicationTypeInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_type_info_list_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}'} - - def provision_application_type( - self, provision_application_type_description_base_required_body_param, timeout=60, custom_headers=None, raw=False, **operation_config): - """Provisions or registers a Service Fabric application type with the - cluster using the '.sfpkg' package in the external store or using the - application package in the image store. - - Provisions a Service Fabric application type with the cluster. The - provision is required before any new applications can be instantiated. - The provision operation can be performed either on the application - package specified by the relativePathInImageStore, or by using the URI - of the external '.sfpkg'. - - :param - provision_application_type_description_base_required_body_param: The - base type of provision application type description which supports - either image store-based provision or external store-based provision. - :type provision_application_type_description_base_required_body_param: - ~azure.servicefabric.models.ProvisionApplicationTypeDescriptionBase - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.2" - - # Construct URL - url = self.provision_application_type.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(provision_application_type_description_base_required_body_param, 'ProvisionApplicationTypeDescriptionBase') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - provision_application_type.metadata = {'url': '/ApplicationTypes/$/Provision'} - - def unprovision_application_type( - self, application_type_name, application_type_version, timeout=60, async_parameter=None, custom_headers=None, raw=False, **operation_config): - """Removes or unregisters a Service Fabric application type from the - cluster. - - This operation can only be performed if all application instances of - the application type have been deleted. Once the application type is - unregistered, no new application instances can be created for this - particular application type. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type - as defined in the application manifest. - :type application_type_version: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param async_parameter: The flag indicating whether or not unprovision - should occur asynchronously. When set to true, the unprovision - operation returns when the request is accepted by the system, and the - unprovision operation continues without any timeout limit. The default - value is false. However, we recommend setting it to true for large - application packages that were provisioned. - :type async_parameter: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - unprovision_application_type_description_info = models.UnprovisionApplicationTypeDescriptionInfo(application_type_version=application_type_version, async_property=async_parameter) - - api_version = "6.0" - - # Construct URL - url = self.unprovision_application_type.metadata['url'] - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(unprovision_application_type_description_info, 'UnprovisionApplicationTypeDescriptionInfo') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - unprovision_application_type.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/Unprovision'} - - def get_service_type_info_list( - self, application_type_name, application_type_version, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list containing the information about service types that are - supported by a provisioned application type in a Service Fabric - cluster. - - Gets the list containing the information about service types that are - supported by a provisioned application type in a Service Fabric - cluster. The provided application type must exist. Otherwise, a 404 - status is returned. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ServiceTypeInfo] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_service_type_info_list.metadata['url'] - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ServiceTypeInfo]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_type_info_list.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes'} - - def get_service_type_info_by_name( - self, application_type_name, application_type_version, service_type_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about a specific service type that is supported by - a provisioned application type in a Service Fabric cluster. - - Gets the information about a specific service type that is supported by - a provisioned application type in a Service Fabric cluster. The - provided application type must exist. Otherwise, a 404 status is - returned. A 204 response is returned if the specified service type is - not found in the cluster. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param service_type_name: Specifies the name of a Service Fabric - service type. - :type service_type_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServiceTypeInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServiceTypeInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_service_type_info_by_name.metadata['url'] - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceTypeInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_type_info_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes/{serviceTypeName}'} - - def get_service_manifest( - self, application_type_name, application_type_version, service_manifest_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the manifest describing a service type. - - Gets the manifest describing a service type. The response contains the - service manifest XML as a string. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param service_manifest_name: The name of a service manifest - registered as part of an application type in a Service Fabric cluster. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServiceTypeManifest or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServiceTypeManifest or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_service_manifest.metadata['url'] - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceTypeManifest', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceManifest'} - - def get_deployed_service_type_info_list( - self, node_name, application_id, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list containing the information about service types from the - applications deployed on a node in a Service Fabric cluster. - - Gets the list containing the information about service types from the - applications deployed on a node in a Service Fabric cluster. The - response includes the name of the service type, its registration - status, the code package that registered it and activation ID of the - service package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_manifest_name: The name of the service manifest to - filter the list of deployed service type information. If specified, - the response will only contain the information about service types - that are defined in this service manifest. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_service_type_info_list.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServiceTypeInfo]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_service_type_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes'} - - def get_deployed_service_type_info_by_name( - self, node_name, application_id, service_type_name, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about a specified service type of the application - deployed on a node in a Service Fabric cluster. - - Gets the list containing the information about a specific service type - from the applications deployed on a node in a Service Fabric cluster. - The response includes the name of the service type, its registration - status, the code package that registered it and activation ID of the - service package. Each entry represents one activation of a service - type, differentiated by the activation ID. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_type_name: Specifies the name of a Service Fabric - service type. - :type service_type_name: str - :param service_manifest_name: The name of the service manifest to - filter the list of deployed service type information. If specified, - the response will only contain the information about service types - that are defined in this service manifest. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_service_type_info_by_name.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServiceTypeInfo]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_service_type_info_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes/{serviceTypeName}'} - - def create_application( - self, application_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Creates a Service Fabric application. - - Creates a Service Fabric application using the specified description. - - :param application_description: Description for creating an - application. - :type application_description: - ~azure.servicefabric.models.ApplicationDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.create_application.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(application_description, 'ApplicationDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - create_application.metadata = {'url': '/Applications/$/Create'} - - def delete_application( - self, application_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Deletes an existing Service Fabric application. - - An application must be created before it can be deleted. Deleting an - application will delete all services that are part of that application. - By default, Service Fabric will try to close service replicas in a - graceful manner and then delete the service. However, if a service is - having issues closing the replica gracefully, the delete operation may - take a long time or get stuck. Use the optional ForceRemove flag to - skip the graceful close sequence and forcefully delete the application - and all of its services. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param force_remove: Remove a Service Fabric application or service - forcefully without going through the graceful shutdown sequence. This - parameter can be used to forcefully delete an application or service - for which delete is timing out due to issues in the service code that - prevents graceful close of replicas. - :type force_remove: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.delete_application.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force_remove is not None: - query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete_application.metadata = {'url': '/Applications/{applicationId}/$/Delete'} - - def get_application_load_info( - self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets load information about a Service Fabric application. - - Returns the load information about the application that was created or - in the process of being created in the Service Fabric cluster and whose - name matches the one specified as the parameter. The response includes - the name, minimum nodes, maximum nodes, the number of nodes the - application is occupying currently, and application load metric - information about the application. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationLoadInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ApplicationLoadInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_application_load_info.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationLoadInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_load_info.metadata = {'url': '/Applications/{applicationId}/$/GetLoadInformation'} - - def get_application_info_list( - self, application_definition_kind_filter=0, application_type_name=None, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of applications created in the Service Fabric cluster - that match the specified filters. - - Gets the information about the applications that were created or in the - process of being created in the Service Fabric cluster and match the - specified filters. The response includes the name, type, status, - parameters, and other details about the application. If the - applications do not fit in a page, one page of results is returned as - well as a continuation token, which can be used to get the next page. - Filters ApplicationTypeName and ApplicationDefinitionKindFilter cannot - be specified at the same time. - - :param application_definition_kind_filter: Used to filter on - ApplicationDefinitionKind, which is the mechanism used to define a - Service Fabric application. - - Default - Default value, which performs the same function as - selecting "All". The value is 0. - - All - Filter that matches input with any ApplicationDefinitionKind - value. The value is 65535. - - ServiceFabricApplicationDescription - Filter that matches input with - ApplicationDefinitionKind value ServiceFabricApplicationDescription. - The value is 1. - - Compose - Filter that matches input with ApplicationDefinitionKind - value Compose. The value is 2. - :type application_definition_kind_filter: int - :param application_type_name: The application type name used to filter - the applications to query for. This value should not contain the - application type version. - :type application_type_name: str - :param exclude_application_parameters: The flag that specifies whether - application parameters will be excluded from the result. - :type exclude_application_parameters: bool - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedApplicationInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedApplicationInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.1" - - # Construct URL - url = self.get_application_info_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if application_definition_kind_filter is not None: - query_parameters['ApplicationDefinitionKindFilter'] = self._serialize.query("application_definition_kind_filter", application_definition_kind_filter, 'int') - if application_type_name is not None: - query_parameters['ApplicationTypeName'] = self._serialize.query("application_type_name", application_type_name, 'str') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedApplicationInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_info_list.metadata = {'url': '/Applications'} - - def get_application_info( - self, application_id, exclude_application_parameters=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets information about a Service Fabric application. - - Returns the information about the application that was created or in - the process of being created in the Service Fabric cluster and whose - name matches the one specified as the parameter. The response includes - the name, type, status, parameters, and other details about the - application. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param exclude_application_parameters: The flag that specifies whether - application parameters will be excluded from the result. - :type exclude_application_parameters: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ApplicationInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_application_info.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_info.metadata = {'url': '/Applications/{applicationId}'} - - def get_application_health( - self, application_id, events_health_state_filter=0, deployed_applications_health_state_filter=0, services_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of the service fabric application. - - Returns the heath state of the service fabric application. The response - reports either Ok, Error or Warning health state. If the entity is not - found in the health store, it will return Error. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param deployed_applications_health_state_filter: Allows filtering of - the deployed applications health state objects returned in the result - of application health query based on their health state. - The possible values for this parameter include integer value of one of - the following health states. Only deployed applications that match the - filter will be returned. - All deployed applications are used to evaluate the aggregated health - state. If not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a - combination of these values, obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of deployed - applications with HealthState value of OK (2) and Warning (4) are - returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type deployed_applications_health_state_filter: int - :param services_health_state_filter: Allows filtering of the services - health state objects returned in the result of services health query - based on their health state. - The possible values for this parameter include integer value of one of - the following health states. - Only services that match the filter are returned. All services are - used to evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, - obtained using bitwise 'OR' operator. For example, if the provided - value is 6 then health state of services with HealthState value of OK - (2) and Warning (4) will be returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type services_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ApplicationHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_application_health.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_applications_health_state_filter is not None: - query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') - if services_health_state_filter is not None: - query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_health.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} - - def get_application_health_using_policy( - self, application_id, events_health_state_filter=0, deployed_applications_health_state_filter=0, services_health_state_filter=0, exclude_health_statistics=False, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of a Service Fabric application using the specified - policy. - - Gets the health of a Service Fabric application. Use - EventsHealthStateFilter to filter the collection of health events - reported on the node based on the health state. Use - ClusterHealthPolicies to override the health policies used to evaluate - the health. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param deployed_applications_health_state_filter: Allows filtering of - the deployed applications health state objects returned in the result - of application health query based on their health state. - The possible values for this parameter include integer value of one of - the following health states. Only deployed applications that match the - filter will be returned. - All deployed applications are used to evaluate the aggregated health - state. If not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a - combination of these values, obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of deployed - applications with HealthState value of OK (2) and Warning (4) are - returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type deployed_applications_health_state_filter: int - :param services_health_state_filter: Allows filtering of the services - health state objects returned in the result of services health query - based on their health state. - The possible values for this parameter include integer value of one of - the following health states. - Only services that match the filter are returned. All services are - used to evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, - obtained using bitwise 'OR' operator. For example, if the provided - value is 6 then health state of services with HealthState value of OK - (2) and Warning (4) will be returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type services_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param application_health_policy: Describes the health policies used - to evaluate the health of an application or one of its children. - If not present, the health evaluation uses the health policy from - application manifest or the default health policy. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ApplicationHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_application_health_using_policy.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_applications_health_state_filter is not None: - query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') - if services_health_state_filter is not None: - query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_health_using_policy.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} - - def report_application_health( - self, application_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Sends a health report on the Service Fabric application. - - Reports health state of the specified Service Fabric application. The - report must contain the information about the source of the health - report and property on which it is reported. - The report is sent to a Service Fabric gateway Application, which - forwards to the health store. - The report may be accepted by the gateway, but rejected by the health - store after extra validation. - For example, the health store may reject the report because of an - invalid parameter, like a stale sequence number. - To see whether the report was applied in the health store, get - application health and check that the report appears in the - HealthEvents section. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param health_information: Describes the health information for the - health report. This information needs to be present in all of the - health reports sent to the health manager. - :type health_information: - ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be - sent immediately. - A health report is sent to a Service Fabric gateway Application, which - forwards to the health store. - If Immediate is set to true, the report is sent immediately from HTTP - Gateway to the health store, regardless of the fabric client settings - that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as - possible. - Depending on timing and other conditions, sending the report may still - fail, for example if the HTTP Gateway is closed or the message doesn't - reach the Gateway. - If Immediate is set to false, the report is sent based on the health - client settings from the HTTP Gateway. Therefore, it will be batched - according to the HealthReportSendInterval configuration. - This is the recommended setting because it allows the health client to - optimize health reporting messages to health store as well as health - report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.report_application_health.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(health_information, 'HealthInformation') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - report_application_health.metadata = {'url': '/Applications/{applicationId}/$/ReportHealth'} - - def start_application_upgrade( - self, application_id, application_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Starts upgrading an application in the Service Fabric cluster. - - Validates the supplied application upgrade parameters and starts - upgrading the application if the parameters are valid. - Note, - [ApplicationParameter](https://docs.microsoft.com/dotnet/api/system.fabric.description.applicationdescription.applicationparameters)s - are not preserved across an application upgrade. - In order to preserve current application parameters, the user should - get the parameters using [GetApplicationInfo](./GetApplicationInfo.md) - operation first and pass them into the upgrade API call as shown in the - example. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param application_upgrade_description: Parameters for an application - upgrade. - :type application_upgrade_description: - ~azure.servicefabric.models.ApplicationUpgradeDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.start_application_upgrade.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(application_upgrade_description, 'ApplicationUpgradeDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/Upgrade'} - - def get_application_upgrade( - self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets details for the latest upgrade performed on this application. - - Returns information about the state of the latest application upgrade - along with details to aid debugging application health issues. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationUpgradeProgressInfo or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.ApplicationUpgradeProgressInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_application_upgrade.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationUpgradeProgressInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/GetUpgradeProgress'} - - def update_application_upgrade( - self, application_id, application_upgrade_update_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Updates an ongoing application upgrade in the Service Fabric cluster. - - Updates the parameters of an ongoing application upgrade from the ones - specified at the time of starting the application upgrade. This may be - required to mitigate stuck application upgrades due to incorrect - parameters or issues in the application to make progress. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param application_upgrade_update_description: Parameters for updating - an existing application upgrade. - :type application_upgrade_update_description: - ~azure.servicefabric.models.ApplicationUpgradeUpdateDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.update_application_upgrade.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(application_upgrade_update_description, 'ApplicationUpgradeUpdateDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - update_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/UpdateUpgrade'} - - def resume_application_upgrade( - self, application_id, upgrade_domain_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Resumes upgrading an application in the Service Fabric cluster. - - Resumes an unmonitored manual Service Fabric application upgrade. - Service Fabric upgrades one upgrade domain at a time. For unmonitored - manual upgrades, after Service Fabric finishes an upgrade domain, it - waits for you to call this API before proceeding to the next upgrade - domain. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param upgrade_domain_name: The name of the upgrade domain in which to - resume the upgrade. - :type upgrade_domain_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - resume_application_upgrade_description = models.ResumeApplicationUpgradeDescription(upgrade_domain_name=upgrade_domain_name) - - api_version = "6.0" - - # Construct URL - url = self.resume_application_upgrade.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(resume_application_upgrade_description, 'ResumeApplicationUpgradeDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - resume_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/MoveToNextUpgradeDomain'} - - def rollback_application_upgrade( - self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Starts rolling back the currently on-going upgrade of an application in - the Service Fabric cluster. - - Starts rolling back the current application upgrade to the previous - version. This API can only be used to roll back the current in-progress - upgrade that is rolling forward to new version. If the application is - not currently being upgraded use StartApplicationUpgrade API to upgrade - it to desired version, including rolling back to a previous version. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.rollback_application_upgrade.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - rollback_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/RollbackUpgrade'} - - def get_deployed_application_info_list( - self, node_name, timeout=60, include_health_state=False, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): - """Gets the list of applications deployed on a Service Fabric node. - - Gets the list of applications deployed on a Service Fabric node. The - results do not include information about deployed system applications - unless explicitly queried for by ID. Results encompass deployed - applications in active, activating, and downloading states. This query - requires that the node name corresponds to a node on the cluster. The - query fails if the provided node name does not point to any active - Service Fabric nodes on the cluster. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param include_health_state: Include the health state of an entity. - If this parameter is false or not specified, then the health state - returned is "Unknown". - When set to true, the query goes in parallel to the node and the - health system service before the results are merged. - As a result, the query is more expensive and may take a longer time. - :type include_health_state: bool - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedDeployedApplicationInfoList or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.PagedDeployedApplicationInfoList - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.1" - - # Construct URL - url = self.get_deployed_application_info_list.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if include_health_state is not None: - query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedDeployedApplicationInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_application_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications'} - - def get_deployed_application_info( - self, node_name, application_id, timeout=60, include_health_state=False, custom_headers=None, raw=False, **operation_config): - """Gets the information about an application deployed on a Service Fabric - node. - - This query returns system application information if the application ID - provided is for system application. Results encompass deployed - applications in active, activating, and downloading states. This query - requires that the node name corresponds to a node on the cluster. The - query fails if the provided node name does not point to any active - Service Fabric nodes on the cluster. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param include_health_state: Include the health state of an entity. - If this parameter is false or not specified, then the health state - returned is "Unknown". - When set to true, the query goes in parallel to the node and the - health system service before the results are merged. - As a result, the query is more expensive and may take a longer time. - :type include_health_state: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: DeployedApplicationInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.DeployedApplicationInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.1" - - # Construct URL - url = self.get_deployed_application_info.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if include_health_state is not None: - query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('DeployedApplicationInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_application_info.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}'} - - def get_deployed_application_health( - self, node_name, application_id, events_health_state_filter=0, deployed_service_packages_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about health of an application deployed on a - Service Fabric node. - - Gets the information about health of an application deployed on a - Service Fabric node. Use EventsHealthStateFilter to optionally filter - for the collection of HealthEvent objects reported on the deployed - application based on health state. Use - DeployedServicePackagesHealthStateFilter to optionally filter for - DeployedServicePackageHealth children based on health state. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param deployed_service_packages_health_state_filter: Allows filtering - of the deployed service package health state objects returned in the - result of deployed application health query based on their health - state. - The possible values for this parameter include integer value of one of - the following health states. - Only deployed service packages that match the filter are returned. All - deployed service packages are used to evaluate the aggregated health - state of the deployed application. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value can be a - combination of these values, obtained using the bitwise 'OR' operator. - For example, if the provided value is 6 then health state of service - packages with HealthState value of OK (2) and Warning (4) are - returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type deployed_service_packages_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: DeployedApplicationHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.DeployedApplicationHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_application_health.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_service_packages_health_state_filter is not None: - query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('DeployedApplicationHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} - - def get_deployed_application_health_using_policy( - self, node_name, application_id, events_health_state_filter=0, deployed_service_packages_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about health of an application deployed on a - Service Fabric node. using the specified policy. - - Gets the information about health of an application deployed on a - Service Fabric node using the specified policy. Use - EventsHealthStateFilter to optionally filter for the collection of - HealthEvent objects reported on the deployed application based on - health state. Use DeployedServicePackagesHealthStateFilter to - optionally filter for DeployedServicePackageHealth children based on - health state. Use ApplicationHealthPolicy to optionally override the - health policies used to evaluate the health. This API only uses - 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The rest - of the fields are ignored while evaluating the health of the deployed - application. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param deployed_service_packages_health_state_filter: Allows filtering - of the deployed service package health state objects returned in the - result of deployed application health query based on their health - state. - The possible values for this parameter include integer value of one of - the following health states. - Only deployed service packages that match the filter are returned. All - deployed service packages are used to evaluate the aggregated health - state of the deployed application. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value can be a - combination of these values, obtained using the bitwise 'OR' operator. - For example, if the provided value is 6 then health state of service - packages with HealthState value of OK (2) and Warning (4) are - returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type deployed_service_packages_health_state_filter: int - :param application_health_policy: Describes the health policies used - to evaluate the health of an application or one of its children. - If not present, the health evaluation uses the health policy from - application manifest or the default health policy. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: DeployedApplicationHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.DeployedApplicationHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_application_health_using_policy.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_service_packages_health_state_filter is not None: - query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('DeployedApplicationHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_application_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} - - def report_deployed_application_health( - self, node_name, application_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Sends a health report on the Service Fabric application deployed on a - Service Fabric node. - - Reports health state of the application deployed on a Service Fabric - node. The report must contain the information about the source of the - health report and property on which it is reported. - The report is sent to a Service Fabric gateway Service, which forwards - to the health store. - The report may be accepted by the gateway, but rejected by the health - store after extra validation. - For example, the health store may reject the report because of an - invalid parameter, like a stale sequence number. - To see whether the report was applied in the health store, get deployed - application health and check that the report appears in the - HealthEvents section. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param health_information: Describes the health information for the - health report. This information needs to be present in all of the - health reports sent to the health manager. - :type health_information: - ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be - sent immediately. - A health report is sent to a Service Fabric gateway Application, which - forwards to the health store. - If Immediate is set to true, the report is sent immediately from HTTP - Gateway to the health store, regardless of the fabric client settings - that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as - possible. - Depending on timing and other conditions, sending the report may still - fail, for example if the HTTP Gateway is closed or the message doesn't - reach the Gateway. - If Immediate is set to false, the report is sent based on the health - client settings from the HTTP Gateway. Therefore, it will be batched - according to the HealthReportSendInterval configuration. - This is the recommended setting because it allows the health client to - optimize health reporting messages to health store as well as health - report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.report_deployed_application_health.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(health_information, 'HealthInformation') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - report_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/ReportHealth'} - - def get_application_manifest( - self, application_type_name, application_type_version, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the manifest describing an application type. - - The response contains the application manifest XML as a string. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationTypeManifest or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ApplicationTypeManifest or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_application_manifest.metadata['url'] - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationTypeManifest', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetApplicationManifest'} - - def get_service_info_list( - self, application_id, service_type_name=None, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about all services belonging to the application - specified by the application ID. - - Returns the information about all services belonging to the application - specified by the application ID. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_type_name: The service type name used to filter the - services to query for. - :type service_type_name: str - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedServiceInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedServiceInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_service_info_list.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if service_type_name is not None: - query_parameters['ServiceTypeName'] = self._serialize.query("service_type_name", service_type_name, 'str') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedServiceInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_info_list.metadata = {'url': '/Applications/{applicationId}/$/GetServices'} - - def get_service_info( - self, application_id, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about the specific service belonging to the - Service Fabric application. - - Returns the information about the specified service belonging to the - specified Service Fabric application. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServiceInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServiceInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_service_info.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_info.metadata = {'url': '/Applications/{applicationId}/$/GetServices/{serviceId}'} - - def get_application_name_info( - self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the name of the Service Fabric application for a service. - - Gets the name of the application for the specified service. A 404 - FABRIC_E_SERVICE_DOES_NOT_EXIST error is returned if a service with the - provided service ID does not exist. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ApplicationNameInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ApplicationNameInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_application_name_info.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationNameInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_name_info.metadata = {'url': '/Services/{serviceId}/$/GetApplicationName'} - - def create_service( - self, application_id, service_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Creates the specified Service Fabric service. - - This api allows creating a new Service Fabric stateless or stateful - service under a specified Service Fabric application. The description - for creating the service includes partitioning information and optional - properties for placement and load balancing. Some of the properties can - later be modified using `UpdateService` API. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_description: The information necessary to create a - service. - :type service_description: - ~azure.servicefabric.models.ServiceDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.create_service.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(service_description, 'ServiceDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - create_service.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/Create'} - - def create_service_from_template( - self, application_id, service_from_template_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Creates a Service Fabric service from the service template. - - Creates a Service Fabric service from the service template defined in - the application manifest. A service template contains the properties - that will be same for the service instance of the same type. The API - allows overriding the properties that are usually different for - different services of the same service type. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_from_template_description: Describes the service that - needs to be created from the template defined in the application - manifest. - :type service_from_template_description: - ~azure.servicefabric.models.ServiceFromTemplateDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.create_service_from_template.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(service_from_template_description, 'ServiceFromTemplateDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - create_service_from_template.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/CreateFromTemplate'} - - def delete_service( - self, service_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Deletes an existing Service Fabric service. - - A service must be created before it can be deleted. By default, Service - Fabric will try to close service replicas in a graceful manner and then - delete the service. However, if the service is having issues closing - the replica gracefully, the delete operation may take a long time or - get stuck. Use the optional ForceRemove flag to skip the graceful close - sequence and forcefully delete the service. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param force_remove: Remove a Service Fabric application or service - forcefully without going through the graceful shutdown sequence. This - parameter can be used to forcefully delete an application or service - for which delete is timing out due to issues in the service code that - prevents graceful close of replicas. - :type force_remove: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.delete_service.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force_remove is not None: - query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete_service.metadata = {'url': '/Services/{serviceId}/$/Delete'} - - def update_service( - self, service_id, service_update_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Updates a Service Fabric service using the specified update - description. - - This API allows updating properties of a running Service Fabric - service. The set of properties that can be updated are a subset of the - properties that were specified at the time of creating the service. The - current set of properties can be obtained using `GetServiceDescription` - API. Note that updating the properties of a running service is - different than upgrading your application using - `StartApplicationUpgrade` API. The upgrade is a long running background - operation that involves moving the application from one version to - another, one upgrade domain at a time, whereas update applies the new - properties immediately to the service. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param service_update_description: The information necessary to update - a service. - :type service_update_description: - ~azure.servicefabric.models.ServiceUpdateDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.update_service.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(service_update_description, 'ServiceUpdateDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - update_service.metadata = {'url': '/Services/{serviceId}/$/Update'} - - def get_service_description( - self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the description of an existing Service Fabric service. - - Gets the description of an existing Service Fabric service. A service - must be created before its description can be obtained. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServiceDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServiceDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_service_description.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceDescription', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_description.metadata = {'url': '/Services/{serviceId}/$/GetDescription'} - - def get_service_health( - self, service_id, events_health_state_filter=0, partitions_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of the specified Service Fabric service. - - Gets the health information of the specified service. - Use EventsHealthStateFilter to filter the collection of health events - reported on the service based on the health state. - Use PartitionsHealthStateFilter to filter the collection of partitions - returned. - If you specify a service that does not exist in the health store, this - request returns an error. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param partitions_health_state_filter: Allows filtering of the - partitions health state objects returned in the result of service - health query based on their health state. - The possible values for this parameter include integer value of one of - the following health states. - Only partitions that match the filter are returned. All partitions are - used to evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - value - obtained using bitwise 'OR' operator. For example, if the provided - value is 6 then health state of partitions with HealthState value of - OK (2) and Warning (4) will be returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type partitions_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServiceHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServiceHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_service_health.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if partitions_health_state_filter is not None: - query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_health.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} - - def get_service_health_using_policy( - self, service_id, events_health_state_filter=0, partitions_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of the specified Service Fabric service, by using the - specified health policy. - - Gets the health information of the specified service. - If the application health policy is specified, the health evaluation - uses it to get the aggregated health state. - If the policy is not specified, the health evaluation uses the - application health policy defined in the application manifest, or the - default health policy, if no policy is defined in the manifest. - Use EventsHealthStateFilter to filter the collection of health events - reported on the service based on the health state. - Use PartitionsHealthStateFilter to filter the collection of partitions - returned. - If you specify a service that does not exist in the health store, this - request returns an error. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param partitions_health_state_filter: Allows filtering of the - partitions health state objects returned in the result of service - health query based on their health state. - The possible values for this parameter include integer value of one of - the following health states. - Only partitions that match the filter are returned. All partitions are - used to evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - value - obtained using bitwise 'OR' operator. For example, if the provided - value is 6 then health state of partitions with HealthState value of - OK (2) and Warning (4) will be returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type partitions_health_state_filter: int - :param application_health_policy: Describes the health policies used - to evaluate the health of an application or one of its children. - If not present, the health evaluation uses the health policy from - application manifest or the default health policy. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServiceHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServiceHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_service_health_using_policy.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if partitions_health_state_filter is not None: - query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_health_using_policy.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} - - def report_service_health( - self, service_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Sends a health report on the Service Fabric service. - - Reports health state of the specified Service Fabric service. The - report must contain the information about the source of the health - report and property on which it is reported. - The report is sent to a Service Fabric gateway Service, which forwards - to the health store. - The report may be accepted by the gateway, but rejected by the health - store after extra validation. - For example, the health store may reject the report because of an - invalid parameter, like a stale sequence number. - To see whether the report was applied in the health store, run - GetServiceHealth and check that the report appears in the HealthEvents - section. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param health_information: Describes the health information for the - health report. This information needs to be present in all of the - health reports sent to the health manager. - :type health_information: - ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be - sent immediately. - A health report is sent to a Service Fabric gateway Application, which - forwards to the health store. - If Immediate is set to true, the report is sent immediately from HTTP - Gateway to the health store, regardless of the fabric client settings - that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as - possible. - Depending on timing and other conditions, sending the report may still - fail, for example if the HTTP Gateway is closed or the message doesn't - reach the Gateway. - If Immediate is set to false, the report is sent based on the health - client settings from the HTTP Gateway. Therefore, it will be batched - according to the HealthReportSendInterval configuration. - This is the recommended setting because it allows the health client to - optimize health reporting messages to health store as well as health - report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.report_service_health.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(health_information, 'HealthInformation') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - report_service_health.metadata = {'url': '/Services/{serviceId}/$/ReportHealth'} - - def resolve_service( - self, service_id, partition_key_type=None, partition_key_value=None, previous_rsp_version=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Resolve a Service Fabric partition. - - Resolve a Service Fabric service partition to get the endpoints of the - service replicas. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_key_type: Key type for the partition. This parameter - is required if the partition scheme for the service is Int64Range or - Named. The possible values are following. - - None (1) - Indicates that the PartitionKeyValue parameter is not - specified. This is valid for the partitions with partitioning scheme - as Singleton. This is the default value. The value is 1. - - Int64Range (2) - Indicates that the PartitionKeyValue parameter is - an int64 partition key. This is valid for the partitions with - partitioning scheme as Int64Range. The value is 2. - - Named (3) - Indicates that the PartitionKeyValue parameter is a name - of the partition. This is valid for the partitions with partitioning - scheme as Named. The value is 3. - :type partition_key_type: int - :param partition_key_value: Partition key. This is required if the - partition scheme for the service is Int64Range or Named. - This is not the partition ID, but rather, either the integer key - value, or the name of the partition ID. - For example, if your service is using ranged partitions from 0 to 10, - then they PartitionKeyValue would be an - integer in that range. Query service description to see the range or - name. - :type partition_key_value: str - :param previous_rsp_version: The value in the Version field of the - response that was received previously. This is required if the user - knows that the result that was gotten previously is stale. - :type previous_rsp_version: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ResolvedServicePartition or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ResolvedServicePartition or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.resolve_service.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if partition_key_type is not None: - query_parameters['PartitionKeyType'] = self._serialize.query("partition_key_type", partition_key_type, 'int') - if partition_key_value is not None: - query_parameters['PartitionKeyValue'] = self._serialize.query("partition_key_value", partition_key_value, 'str', skip_quote=True) - if previous_rsp_version is not None: - query_parameters['PreviousRspVersion'] = self._serialize.query("previous_rsp_version", previous_rsp_version, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ResolvedServicePartition', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - resolve_service.metadata = {'url': '/Services/{serviceId}/$/ResolvePartition'} - - def get_unplaced_replica_information( - self, service_id, partition_id=None, only_query_primaries=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about unplaced replica of the service. - - Returns the information about the unplaced replicas of the service. - If PartitionId is specified, then result will contain information only - about unplaced replicas for that partition. - If PartitionId is not specified, then result will contain information - about unplaced replicas for all partitions of that service. - If OnlyQueryPrimaries is set to true, then result will contain - information only about primary replicas, and will ignore unplaced - secondary replicas. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param only_query_primaries: Indicates that unplaced replica - information will be queries only for primary replicas. - :type only_query_primaries: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: UnplacedReplicaInformation or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.UnplacedReplicaInformation or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_unplaced_replica_information.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if partition_id is not None: - query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') - if only_query_primaries is not None: - query_parameters['OnlyQueryPrimaries'] = self._serialize.query("only_query_primaries", only_query_primaries, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UnplacedReplicaInformation', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_unplaced_replica_information.metadata = {'url': '/Services/{serviceId}/$/GetUnplacedReplicaInformation'} - - def get_partition_info_list( - self, service_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of partitions of a Service Fabric service. - - The response includes the partition ID, partitioning scheme - information, keys supported by the partition, status, health, and other - details about the partition. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedServicePartitionInfoList or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.PagedServicePartitionInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_partition_info_list.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedServicePartitionInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_info_list.metadata = {'url': '/Services/{serviceId}/$/GetPartitions'} - - def get_partition_info( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about a Service Fabric partition. - - Gets the information about the specified partition. The response - includes the partition ID, partitioning scheme information, keys - supported by the partition, status, health, and other details about the - partition. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServicePartitionInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServicePartitionInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_partition_info.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServicePartitionInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_info.metadata = {'url': '/Partitions/{partitionId}'} - - def get_service_name_info( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the name of the Service Fabric service for a partition. - - Gets name of the service for the specified partition. A 404 error is - returned if the partition ID does not exist in the cluster. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ServiceNameInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ServiceNameInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_service_name_info.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceNameInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_name_info.metadata = {'url': '/Partitions/{partitionId}/$/GetServiceName'} - - def get_partition_health( - self, partition_id, events_health_state_filter=0, replicas_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of the specified Service Fabric partition. - - Use EventsHealthStateFilter to filter the collection of health events - reported on the service based on the health state. - Use ReplicasHealthStateFilter to filter the collection of - ReplicaHealthState objects on the partition. - If you specify a partition that does not exist in the health store, - this request returns an error. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param replicas_health_state_filter: Allows filtering the collection - of ReplicaHealthState objects on the partition. The value can be - obtained from members or bitwise operations on members of - HealthStateFilter. Only replicas that match the filter will be - returned. All replicas will be used to evaluate the aggregated health - state. If not specified, all entries will be returned.The state values - are flag-based enumeration, so the value could be a combination of - these values obtained using bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) will be returned. The possible values for this - parameter include integer value of one of the following health states. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type replicas_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PartitionHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PartitionHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_partition_health.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if replicas_health_state_filter is not None: - query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PartitionHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} - - def get_partition_health_using_policy( - self, partition_id, events_health_state_filter=0, replicas_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of the specified Service Fabric partition, by using the - specified health policy. - - Gets the health information of the specified partition. - If the application health policy is specified, the health evaluation - uses it to get the aggregated health state. - If the policy is not specified, the health evaluation uses the - application health policy defined in the application manifest, or the - default health policy, if no policy is defined in the manifest. - Use EventsHealthStateFilter to filter the collection of health events - reported on the partition based on the health state. - Use ReplicasHealthStateFilter to filter the collection of - ReplicaHealthState objects on the partition. Use - ApplicationHealthPolicy in the POST body to override the health - policies used to evaluate the health. - If you specify a partition that does not exist in the health store, - this request returns an error. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param replicas_health_state_filter: Allows filtering the collection - of ReplicaHealthState objects on the partition. The value can be - obtained from members or bitwise operations on members of - HealthStateFilter. Only replicas that match the filter will be - returned. All replicas will be used to evaluate the aggregated health - state. If not specified, all entries will be returned.The state values - are flag-based enumeration, so the value could be a combination of - these values obtained using bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) will be returned. The possible values for this - parameter include integer value of one of the following health states. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type replicas_health_state_filter: int - :param application_health_policy: Describes the health policies used - to evaluate the health of an application or one of its children. - If not present, the health evaluation uses the health policy from - application manifest or the default health policy. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param exclude_health_statistics: Indicates whether the health - statistics should be returned as part of the query result. False by - default. - The statistics show the number of children entities in health state - Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PartitionHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PartitionHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_partition_health_using_policy.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if replicas_health_state_filter is not None: - query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PartitionHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} - - def report_partition_health( - self, partition_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Sends a health report on the Service Fabric partition. - - Reports health state of the specified Service Fabric partition. The - report must contain the information about the source of the health - report and property on which it is reported. - The report is sent to a Service Fabric gateway Partition, which - forwards to the health store. - The report may be accepted by the gateway, but rejected by the health - store after extra validation. - For example, the health store may reject the report because of an - invalid parameter, like a stale sequence number. - To see whether the report was applied in the health store, run - GetPartitionHealth and check that the report appears in the - HealthEvents section. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param health_information: Describes the health information for the - health report. This information needs to be present in all of the - health reports sent to the health manager. - :type health_information: - ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be - sent immediately. - A health report is sent to a Service Fabric gateway Application, which - forwards to the health store. - If Immediate is set to true, the report is sent immediately from HTTP - Gateway to the health store, regardless of the fabric client settings - that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as - possible. - Depending on timing and other conditions, sending the report may still - fail, for example if the HTTP Gateway is closed or the message doesn't - reach the Gateway. - If Immediate is set to false, the report is sent based on the health - client settings from the HTTP Gateway. Therefore, it will be batched - according to the HealthReportSendInterval configuration. - This is the recommended setting because it allows the health client to - optimize health reporting messages to health store as well as health - report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.report_partition_health.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(health_information, 'HealthInformation') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - report_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/ReportHealth'} - - def get_partition_load_information( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the load information of the specified Service Fabric partition. - - Returns information about the load of a specified partition. - The response includes a list of load reports for a Service Fabric - partition. - Each report includes the load metric name, value, and last reported - time in UTC. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PartitionLoadInformation or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PartitionLoadInformation or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_partition_load_information.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PartitionLoadInformation', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_load_information.metadata = {'url': '/Partitions/{partitionId}/$/GetLoadInformation'} - - def reset_partition_load( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Resets the current load of a Service Fabric partition. - - Resets the current load of a Service Fabric partition to the default - load for the service. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.reset_partition_load.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - reset_partition_load.metadata = {'url': '/Partitions/{partitionId}/$/ResetLoad'} - - def recover_partition( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Indicates to the Service Fabric cluster that it should attempt to - recover a specific partition that is currently stuck in quorum loss. - - This operation should only be performed if it is known that the - replicas that are down cannot be recovered. Incorrect use of this API - can cause potential data loss. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.recover_partition.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - recover_partition.metadata = {'url': '/Partitions/{partitionId}/$/Recover'} - - def recover_service_partitions( - self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Indicates to the Service Fabric cluster that it should attempt to - recover the specified service that is currently stuck in quorum loss. - - Indicates to the Service Fabric cluster that it should attempt to - recover the specified service that is currently stuck in quorum loss. - This operation should only be performed if it is known that the - replicas that are down cannot be recovered. Incorrect use of this API - can cause potential data loss. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.recover_service_partitions.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - recover_service_partitions.metadata = {'url': '/Services/$/{serviceId}/$/GetPartitions/$/Recover'} - - def recover_system_partitions( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Indicates to the Service Fabric cluster that it should attempt to - recover the system services that are currently stuck in quorum loss. - - Indicates to the Service Fabric cluster that it should attempt to - recover the system services that are currently stuck in quorum loss. - This operation should only be performed if it is known that the - replicas that are down cannot be recovered. Incorrect use of this API - can cause potential data loss. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.recover_system_partitions.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - recover_system_partitions.metadata = {'url': '/$/RecoverSystemPartitions'} - - def recover_all_partitions( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Indicates to the Service Fabric cluster that it should attempt to - recover any services (including system services) which are currently - stuck in quorum loss. - - This operation should only be performed if it is known that the - replicas that are down cannot be recovered. Incorrect use of this API - can cause potential data loss. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.recover_all_partitions.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - recover_all_partitions.metadata = {'url': '/$/RecoverAllPartitions'} - - def move_primary_replica( - self, partition_id, node_name=None, ignore_constraints=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Moves the primary replica of a partition of a stateful service. - - This command moves the primary replica of a partition of a stateful - service, respecting all constraints. - If NodeName parameter is specified, primary will be moved to the - specified node (if constraints allow it). - If NodeName parameter is not specified, primary replica will be moved - to a random node in the cluster. - If IgnoreConstraints parameter is specified and set to true, then - primary will be moved regardless of the constraints. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param node_name: The name of the node. - :type node_name: str - :param ignore_constraints: Ignore constraints when moving a replica. - If this parameter is not specified, all constraints are honored. - :type ignore_constraints: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.5" - - # Construct URL - url = self.move_primary_replica.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if node_name is not None: - query_parameters['NodeName'] = self._serialize.query("node_name", node_name, 'str') - if ignore_constraints is not None: - query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - move_primary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MovePrimaryReplica'} - - def move_secondary_replica( - self, partition_id, current_node_name, new_node_name=None, ignore_constraints=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Moves the secondary replica of a partition of a stateful service. - - This command moves the secondary replica of a partition of a stateful - service, respecting all constraints. - CurrentNodeName parameter must be specified to identify the replica - that is moved. - Source node name must be specified, but new node name can be omitted, - and in that case replica is moved to a random node. - If IgnoreConstraints parameter is specified and set to true, then - secondary will be moved regardless of the constraints. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param current_node_name: The name of the source node for secondary - replica move. - :type current_node_name: str - :param new_node_name: The name of the target node for secondary - replica move. If not specified, replica is moved to a random node. - :type new_node_name: str - :param ignore_constraints: Ignore constraints when moving a replica. - If this parameter is not specified, all constraints are honored. - :type ignore_constraints: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.5" - - # Construct URL - url = self.move_secondary_replica.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') - if new_node_name is not None: - query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') - if ignore_constraints is not None: - query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - move_secondary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MoveSecondaryReplica'} - - def update_partition_load( - self, partition_metric_load_description_list, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Update the loads of provided partitions for specific metrics. - - Updates the load value and predicted load value for all the partitions - provided for specified metrics. - - :param partition_metric_load_description_list: Description of updating - load for list of partitions. - :type partition_metric_load_description_list: - list[~azure.servicefabric.models.PartitionMetricLoadDescription] - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedUpdatePartitionLoadResultList or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.PagedUpdatePartitionLoadResultList - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "7.2" - - # Construct URL - url = self.update_partition_load.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(partition_metric_load_description_list, '[PartitionMetricLoadDescription]') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedUpdatePartitionLoadResultList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - update_partition_load.metadata = {'url': '/$/UpdatePartitionLoad'} - - def create_repair_task( - self, repair_task, custom_headers=None, raw=False, **operation_config): - """Creates a new repair task. - - For clusters that have the Repair Manager Service configured, - this API provides a way to create repair tasks that run automatically - or manually. - For repair tasks that run automatically, an appropriate repair executor - must be running for each repair action to run automatically. - These are currently only available in specially-configured Azure Cloud - Services. - To create a manual repair task, provide the set of impacted node names - and the - expected impact. When the state of the created repair task changes to - approved, - you can safely perform repair actions on those nodes. - This API supports the Service Fabric platform; it is not meant to be - used directly from your code. - - :param repair_task: Describes the repair task to be created or - updated. - :type repair_task: ~azure.servicefabric.models.RepairTask - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.create_repair_task.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(repair_task, 'RepairTask') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('RepairTaskUpdateInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - create_repair_task.metadata = {'url': '/$/CreateRepairTask'} - - def cancel_repair_task( - self, repair_task_cancel_description, custom_headers=None, raw=False, **operation_config): - """Requests the cancellation of the given repair task. - - This API supports the Service Fabric platform; it is not meant to be - used directly from your code. - - :param repair_task_cancel_description: Describes the repair task to be - cancelled. - :type repair_task_cancel_description: - ~azure.servicefabric.models.RepairTaskCancelDescription - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.cancel_repair_task.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(repair_task_cancel_description, 'RepairTaskCancelDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('RepairTaskUpdateInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - cancel_repair_task.metadata = {'url': '/$/CancelRepairTask'} - - def delete_repair_task( - self, task_id, version=None, custom_headers=None, raw=False, **operation_config): - """Deletes a completed repair task. - - This API supports the Service Fabric platform; it is not meant to be - used directly from your code. - - :param task_id: The ID of the completed repair task to be deleted. - :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current version of the repair task. If zero, then no version - check is performed. - :type version: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - repair_task_delete_description = models.RepairTaskDeleteDescription(task_id=task_id, version=version) - - api_version = "6.0" - - # Construct URL - url = self.delete_repair_task.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(repair_task_delete_description, 'RepairTaskDeleteDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete_repair_task.metadata = {'url': '/$/DeleteRepairTask'} - - def get_repair_task_list( - self, task_id_filter=None, state_filter=None, executor_filter=None, custom_headers=None, raw=False, **operation_config): - """Gets a list of repair tasks matching the given filters. - - This API supports the Service Fabric platform; it is not meant to be - used directly from your code. - - :param task_id_filter: The repair task ID prefix to be matched. - :type task_id_filter: str - :param state_filter: A bitwise-OR of the following values, specifying - which task states should be included in the result list. - - 1 - Created - - 2 - Claimed - - 4 - Preparing - - 8 - Approved - - 16 - Executing - - 32 - Restoring - - 64 - Completed - :type state_filter: int - :param executor_filter: The name of the repair executor whose claimed - tasks should be included in the list. - :type executor_filter: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.RepairTask] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_repair_task_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if task_id_filter is not None: - query_parameters['TaskIdFilter'] = self._serialize.query("task_id_filter", task_id_filter, 'str') - if state_filter is not None: - query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') - if executor_filter is not None: - query_parameters['ExecutorFilter'] = self._serialize.query("executor_filter", executor_filter, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[RepairTask]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_repair_task_list.metadata = {'url': '/$/GetRepairTaskList'} - - def force_approve_repair_task( - self, task_id, version=None, custom_headers=None, raw=False, **operation_config): - """Forces the approval of the given repair task. - - This API supports the Service Fabric platform; it is not meant to be - used directly from your code. - - :param task_id: The ID of the repair task. - :type task_id: str - :param version: The current version number of the repair task. If - non-zero, then the request will only succeed if this value matches the - actual current version of the repair task. If zero, then no version - check is performed. - :type version: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - repair_task_approve_description = models.RepairTaskApproveDescription(task_id=task_id, version=version) - - api_version = "6.0" - - # Construct URL - url = self.force_approve_repair_task.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(repair_task_approve_description, 'RepairTaskApproveDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('RepairTaskUpdateInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - force_approve_repair_task.metadata = {'url': '/$/ForceApproveRepairTask'} - - def update_repair_task_health_policy( - self, repair_task_update_health_policy_description, custom_headers=None, raw=False, **operation_config): - """Updates the health policy of the given repair task. - - This API supports the Service Fabric platform; it is not meant to be - used directly from your code. - - :param repair_task_update_health_policy_description: Describes the - repair task healthy policy to be updated. - :type repair_task_update_health_policy_description: - ~azure.servicefabric.models.RepairTaskUpdateHealthPolicyDescription - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.update_repair_task_health_policy.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(repair_task_update_health_policy_description, 'RepairTaskUpdateHealthPolicyDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('RepairTaskUpdateInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - update_repair_task_health_policy.metadata = {'url': '/$/UpdateRepairTaskHealthPolicy'} - - def update_repair_execution_state( - self, repair_task, custom_headers=None, raw=False, **operation_config): - """Updates the execution state of a repair task. - - This API supports the Service Fabric platform; it is not meant to be - used directly from your code. - - :param repair_task: Describes the repair task to be created or - updated. - :type repair_task: ~azure.servicefabric.models.RepairTask - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.update_repair_execution_state.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(repair_task, 'RepairTask') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('RepairTaskUpdateInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - update_repair_execution_state.metadata = {'url': '/$/UpdateRepairExecutionState'} - - def get_replica_info_list( - self, partition_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about replicas of a Service Fabric service - partition. - - The GetReplicas endpoint returns information about the replicas of the - specified partition. The response includes the ID, role, status, - health, node name, uptime, and other details about the replica. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedReplicaInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedReplicaInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_replica_info_list.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedReplicaInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_replica_info_list.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas'} - - def get_replica_info( - self, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about a replica of a Service Fabric partition. - - The response includes the ID, role, status, health, node name, uptime, - and other details about the replica. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ReplicaInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ReplicaInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_replica_info.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ReplicaInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_replica_info.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}'} - - def get_replica_health( - self, partition_id, replica_id, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of a Service Fabric stateful service replica or - stateless service instance. - - Gets the health of a Service Fabric replica. - Use EventsHealthStateFilter to filter the collection of health events - reported on the replica based on the health state. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ReplicaHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ReplicaHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_replica_health.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ReplicaHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} - - def get_replica_health_using_policy( - self, partition_id, replica_id, events_health_state_filter=0, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the health of a Service Fabric stateful service replica or - stateless service instance using the specified policy. - - Gets the health of a Service Fabric stateful service replica or - stateless service instance. - Use EventsHealthStateFilter to filter the collection of health events - reported on the cluster based on the health state. - Use ApplicationHealthPolicy to optionally override the health policies - used to evaluate the health. This API only uses - 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The rest - of the fields are ignored while evaluating the health of the replica. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param application_health_policy: Describes the health policies used - to evaluate the health of an application or one of its children. - If not present, the health evaluation uses the health policy from - application manifest or the default health policy. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ReplicaHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ReplicaHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_replica_health_using_policy.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ReplicaHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_replica_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} - - def report_replica_health( - self, partition_id, replica_id, health_information, service_kind="Stateful", immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Sends a health report on the Service Fabric replica. - - Reports health state of the specified Service Fabric replica. The - report must contain the information about the source of the health - report and property on which it is reported. - The report is sent to a Service Fabric gateway Replica, which forwards - to the health store. - The report may be accepted by the gateway, but rejected by the health - store after extra validation. - For example, the health store may reject the report because of an - invalid parameter, like a stale sequence number. - To see whether the report was applied in the health store, run - GetReplicaHealth and check that the report appears in the HealthEvents - section. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param service_kind: The kind of service replica (Stateless or - Stateful) for which the health is being reported. Following are the - possible values. Possible values include: 'Stateless', 'Stateful' - :type service_kind: str or - ~azure.servicefabric.models.ReplicaHealthReportServiceKind - :param health_information: Describes the health information for the - health report. This information needs to be present in all of the - health reports sent to the health manager. - :type health_information: - ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be - sent immediately. - A health report is sent to a Service Fabric gateway Application, which - forwards to the health store. - If Immediate is set to true, the report is sent immediately from HTTP - Gateway to the health store, regardless of the fabric client settings - that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as - possible. - Depending on timing and other conditions, sending the report may still - fail, for example if the HTTP Gateway is closed or the message doesn't - reach the Gateway. - If Immediate is set to false, the report is sent based on the health - client settings from the HTTP Gateway. Therefore, it will be batched - according to the HealthReportSendInterval configuration. - This is the recommended setting because it allows the health client to - optimize health reporting messages to health store as well as health - report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.report_replica_health.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceKind'] = self._serialize.query("service_kind", service_kind, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(health_information, 'HealthInformation') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - report_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/ReportHealth'} - - def get_deployed_service_replica_info_list( - self, node_name, application_id, partition_id=None, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of replicas deployed on a Service Fabric node. - - Gets the list containing the information about replicas deployed on a - Service Fabric node. The information include partition ID, replica ID, - status of the replica, name of the service, name of the service type, - and other information. Use PartitionId or ServiceManifestName query - parameters to return information about the deployed replicas matching - the specified values for those parameters. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param service_manifest_name: The name of a service manifest - registered as part of an application type in a Service Fabric cluster. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.DeployedServiceReplicaInfo] - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_service_replica_info_list.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if partition_id is not None: - query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServiceReplicaInfo]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_service_replica_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetReplicas'} - - def get_deployed_service_replica_detail_info( - self, node_name, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the details of replica deployed on a Service Fabric node. - - Gets the details of the replica deployed on a Service Fabric node. The - information includes service kind, service name, current service - operation, current service operation start date time, partition ID, - replica/instance ID, reported load, and other information. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: DeployedServiceReplicaDetailInfo or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_service_replica_detail_info.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_service_replica_detail_info.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetDetail'} - - def get_deployed_service_replica_detail_info_by_partition_id( - self, node_name, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the details of replica deployed on a Service Fabric node. - - Gets the details of the replica deployed on a Service Fabric node. The - information includes service kind, service name, current service - operation, current service operation start date time, partition ID, - replica/instance ID, reported load, and other information. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: DeployedServiceReplicaDetailInfo or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_service_replica_detail_info_by_partition_id.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_service_replica_detail_info_by_partition_id.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas'} - - def restart_replica( - self, node_name, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Restarts a service replica of a persisted service running on a node. - - Restarts a service replica of a persisted service running on a node. - Warning - There are no safety checks performed when this API is used. - Incorrect use of this API can lead to availability loss for stateful - services. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.restart_replica.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - restart_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Restart'} - - def remove_replica( - self, node_name, partition_id, replica_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Removes a service replica running on a node. - - This API simulates a Service Fabric replica failure by removing a - replica from a Service Fabric cluster. The removal closes the replica, - transitions the replica to the role None, and then removes all of the - state information of the replica from the cluster. This API tests the - replica state removal path, and simulates the report fault permanent - path through client APIs. Warning - There are no safety checks - performed when this API is used. Incorrect use of this API can lead to - data loss for stateful services. In addition, the forceRemove flag - impacts all other replicas hosted in the same process. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param force_remove: Remove a Service Fabric application or service - forcefully without going through the graceful shutdown sequence. This - parameter can be used to forcefully delete an application or service - for which delete is timing out due to issues in the service code that - prevents graceful close of replicas. - :type force_remove: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.remove_replica.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force_remove is not None: - query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - remove_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Delete'} - - def get_deployed_service_package_info_list( - self, node_name, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of service packages deployed on a Service Fabric node. - - Returns the information about the service packages deployed on a - Service Fabric node for the given application. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_service_package_info_list.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServicePackageInfo]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_service_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages'} - - def get_deployed_service_package_info_list_by_name( - self, node_name, application_id, service_package_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of service packages deployed on a Service Fabric node - matching exactly the specified name. - - Returns the information about the service packages deployed on a - Service Fabric node for the given application. These results are of - service packages whose name match exactly the service package name - specified as the parameter. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_service_package_info_list_by_name.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServicePackageInfo]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_service_package_info_list_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}'} - - def get_deployed_service_package_health( - self, node_name, application_id, service_package_name, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about health of a service package for a specific - application deployed for a Service Fabric node and application. - - Gets the information about health of a service package for a specific - application deployed on a Service Fabric node. Use - EventsHealthStateFilter to optionally filter for the collection of - HealthEvent objects reported on the deployed service package based on - health state. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: DeployedServicePackageHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_service_package_health.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('DeployedServicePackageHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} - - def get_deployed_service_package_health_using_policy( - self, node_name, application_id, service_package_name, events_health_state_filter=0, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about health of service package for a specific - application deployed on a Service Fabric node using the specified - policy. - - Gets the information about health of a service package for a specific - application deployed on a Service Fabric node. using the specified - policy. Use EventsHealthStateFilter to optionally filter for the - collection of HealthEvent objects reported on the deployed service - package based on health state. Use ApplicationHealthPolicy to - optionally override the health policies used to evaluate the health. - This API only uses 'ConsiderWarningAsError' field of the - ApplicationHealthPolicy. The rest of the fields are ignored while - evaluating the health of the deployed service package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param events_health_state_filter: Allows filtering the collection of - HealthEvent objects returned based on health state. - The possible values for this parameter include integer value of one of - the following health states. - Only events that match the filter are returned. All events are used to - evaluate the aggregated health state. - If not specified, all entries are returned. The state values are - flag-based enumeration, so the value could be a combination of these - values, obtained using the bitwise 'OR' operator. For example, If the - provided value is 6 then all of the events with HealthState value of - OK (2) and Warning (4) are returned. - - Default - Default value. Matches any HealthState. The value is zero. - - None - Filter that doesn't match any HealthState value. Used in - order to return no results on a given collection of states. The value - is 1. - - Ok - Filter that matches input with HealthState value Ok. The value - is 2. - - Warning - Filter that matches input with HealthState value Warning. - The value is 4. - - Error - Filter that matches input with HealthState value Error. The - value is 8. - - All - Filter that matches input with any HealthState value. The - value is 65535. - :type events_health_state_filter: int - :param application_health_policy: Describes the health policies used - to evaluate the health of an application or one of its children. - If not present, the health evaluation uses the health policy from - application manifest or the default health policy. - :type application_health_policy: - ~azure.servicefabric.models.ApplicationHealthPolicy - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: DeployedServicePackageHealth or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_service_package_health_using_policy.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('DeployedServicePackageHealth', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_service_package_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} - - def report_deployed_service_package_health( - self, node_name, application_id, service_package_name, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Sends a health report on the Service Fabric deployed service package. - - Reports health state of the service package of the application deployed - on a Service Fabric node. The report must contain the information about - the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway Service, which forwards - to the health store. - The report may be accepted by the gateway, but rejected by the health - store after extra validation. - For example, the health store may reject the report because of an - invalid parameter, like a stale sequence number. - To see whether the report was applied in the health store, get deployed - service package health and check that the report appears in the - HealthEvents section. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param health_information: Describes the health information for the - health report. This information needs to be present in all of the - health reports sent to the health manager. - :type health_information: - ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be - sent immediately. - A health report is sent to a Service Fabric gateway Application, which - forwards to the health store. - If Immediate is set to true, the report is sent immediately from HTTP - Gateway to the health store, regardless of the fabric client settings - that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as - possible. - Depending on timing and other conditions, sending the report may still - fail, for example if the HTTP Gateway is closed or the message doesn't - reach the Gateway. - If Immediate is set to false, the report is sent based on the health - client settings from the HTTP Gateway. Therefore, it will be batched - according to the HealthReportSendInterval configuration. - This is the recommended setting because it allows the health client to - optimize health reporting messages to health store as well as health - report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.report_deployed_service_package_health.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(health_information, 'HealthInformation') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - report_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/ReportHealth'} - - def deploy_service_package_to_node( - self, node_name, deploy_service_package_to_node_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Downloads all of the code packages associated with specified service - manifest on the specified node. - - This API provides a way to download code packages including the - container images on a specific node outside of the normal application - deployment and upgrade path. This is useful for the large code packages - and container images to be present on the node before the actual - application deployment and upgrade, thus significantly reducing the - total time required for the deployment or upgrade. - - :param node_name: The name of the node. - :type node_name: str - :param deploy_service_package_to_node_description: Describes - information for deploying a service package to a Service Fabric node. - :type deploy_service_package_to_node_description: - ~azure.servicefabric.models.DeployServicePackageToNodeDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.deploy_service_package_to_node.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(deploy_service_package_to_node_description, 'DeployServicePackageToNodeDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - deploy_service_package_to_node.metadata = {'url': '/Nodes/{nodeName}/$/DeployServicePackage'} - - def get_deployed_code_package_info_list( - self, node_name, application_id, service_manifest_name=None, code_package_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of code packages deployed on a Service Fabric node. - - Gets the list of code packages deployed on a Service Fabric node for - the given application. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_manifest_name: The name of a service manifest - registered as part of an application type in a Service Fabric cluster. - :type service_manifest_name: str - :param code_package_name: The name of code package specified in - service manifest registered as part of an application type in a - Service Fabric cluster. - :type code_package_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.DeployedCodePackageInfo] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_deployed_code_package_info_list.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if code_package_name is not None: - query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedCodePackageInfo]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_deployed_code_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages'} - - def restart_deployed_code_package( - self, node_name, application_id, restart_deployed_code_package_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Restarts a code package deployed on a Service Fabric node in a cluster. - - Restarts a code package deployed on a Service Fabric node in a cluster. - This aborts the code package process, which will restart all the user - service replicas hosted in that process. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param restart_deployed_code_package_description: Describes the - deployed code package on Service Fabric node to restart. - :type restart_deployed_code_package_description: - ~azure.servicefabric.models.RestartDeployedCodePackageDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.restart_deployed_code_package.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(restart_deployed_code_package_description, 'RestartDeployedCodePackageDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - restart_deployed_code_package.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/Restart'} - - def get_container_logs_deployed_on_node( - self, node_name, application_id, service_manifest_name, code_package_name, tail=None, previous=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the container logs for container deployed on a Service Fabric - node. - - Gets the container logs for container deployed on a Service Fabric node - for the given code package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_manifest_name: The name of a service manifest - registered as part of an application type in a Service Fabric cluster. - :type service_manifest_name: str - :param code_package_name: The name of code package specified in - service manifest registered as part of an application type in a - Service Fabric cluster. - :type code_package_name: str - :param tail: Number of lines to show from the end of the logs. Default - is 100. 'all' to show the complete logs. - :type tail: str - :param previous: Specifies whether to get container logs from - exited/dead containers of the code package instance. - :type previous: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ContainerLogs or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ContainerLogs or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.2" - - # Construct URL - url = self.get_container_logs_deployed_on_node.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') - if tail is not None: - query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') - if previous is not None: - query_parameters['Previous'] = self._serialize.query("previous", previous, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ContainerLogs', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_container_logs_deployed_on_node.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerLogs'} - - def invoke_container_api( - self, node_name, application_id, service_manifest_name, code_package_name, code_package_instance_id, container_api_request_body, timeout=60, custom_headers=None, raw=False, **operation_config): - """Invoke container API on a container deployed on a Service Fabric node. - - Invoke container API on a container deployed on a Service Fabric node - for the given code package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param service_manifest_name: The name of a service manifest - registered as part of an application type in a Service Fabric cluster. - :type service_manifest_name: str - :param code_package_name: The name of code package specified in - service manifest registered as part of an application type in a - Service Fabric cluster. - :type code_package_name: str - :param code_package_instance_id: ID that uniquely identifies a code - package instance deployed on a service fabric node. - :type code_package_instance_id: str - :param container_api_request_body: Parameters for making container API - call - :type container_api_request_body: - ~azure.servicefabric.models.ContainerApiRequestBody - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ContainerApiResponse or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ContainerApiResponse or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.2" - - # Construct URL - url = self.invoke_container_api.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') - query_parameters['CodePackageInstanceId'] = self._serialize.query("code_package_instance_id", code_package_instance_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(container_api_request_body, 'ContainerApiRequestBody') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ContainerApiResponse', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - invoke_container_api.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerApi'} - - def create_compose_deployment( - self, create_compose_deployment_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Creates a Service Fabric compose deployment. - - Compose is a file format that describes multi-container applications. - This API allows deploying container based applications defined in - compose format in a Service Fabric cluster. Once the deployment is - created, its status can be tracked via the `GetComposeDeploymentStatus` - API. - - :param create_compose_deployment_description: Describes the compose - deployment that needs to be created. - :type create_compose_deployment_description: - ~azure.servicefabric.models.CreateComposeDeploymentDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0-preview" - - # Construct URL - url = self.create_compose_deployment.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(create_compose_deployment_description, 'CreateComposeDeploymentDescription') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - create_compose_deployment.metadata = {'url': '/ComposeDeployments/$/Create'} - - def get_compose_deployment_status( - self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets information about a Service Fabric compose deployment. - - Returns the status of the compose deployment that was created or in the - process of being created in the Service Fabric cluster and whose name - matches the one specified as the parameter. The response includes the - name, status, and other details about the deployment. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ComposeDeploymentStatusInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ComposeDeploymentStatusInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0-preview" - - # Construct URL - url = self.get_compose_deployment_status.metadata['url'] - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ComposeDeploymentStatusInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_compose_deployment_status.metadata = {'url': '/ComposeDeployments/{deploymentName}'} - - def get_compose_deployment_status_list( - self, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of compose deployments created in the Service Fabric - cluster. - - Gets the status about the compose deployments that were created or in - the process of being created in the Service Fabric cluster. The - response includes the name, status, and other details about the compose - deployments. If the list of deployments do not fit in a page, one page - of results is returned as well as a continuation token, which can be - used to get the next page. - - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedComposeDeploymentStatusInfoList or ClientRawResponse if - raw=true - :rtype: - ~azure.servicefabric.models.PagedComposeDeploymentStatusInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0-preview" - - # Construct URL - url = self.get_compose_deployment_status_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedComposeDeploymentStatusInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_compose_deployment_status_list.metadata = {'url': '/ComposeDeployments'} - - def get_compose_deployment_upgrade_progress( - self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets details for the latest upgrade performed on this Service Fabric - compose deployment. - - Returns the information about the state of the compose deployment - upgrade along with details to aid debugging application health issues. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ComposeDeploymentUpgradeProgressInfo or ClientRawResponse if - raw=true - :rtype: - ~azure.servicefabric.models.ComposeDeploymentUpgradeProgressInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0-preview" - - # Construct URL - url = self.get_compose_deployment_upgrade_progress.metadata['url'] - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ComposeDeploymentUpgradeProgressInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_compose_deployment_upgrade_progress.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/GetUpgradeProgress'} - - def remove_compose_deployment( - self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Deletes an existing Service Fabric compose deployment from cluster. - - Deletes an existing Service Fabric compose deployment. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0-preview" - - # Construct URL - url = self.remove_compose_deployment.metadata['url'] - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - remove_compose_deployment.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Delete'} - - def start_compose_deployment_upgrade( - self, deployment_name, compose_deployment_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Starts upgrading a compose deployment in the Service Fabric cluster. - - Validates the supplied upgrade parameters and starts upgrading the - deployment if the parameters are valid. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param compose_deployment_upgrade_description: Parameters for - upgrading compose deployment. - :type compose_deployment_upgrade_description: - ~azure.servicefabric.models.ComposeDeploymentUpgradeDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0-preview" - - # Construct URL - url = self.start_compose_deployment_upgrade.metadata['url'] - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(compose_deployment_upgrade_description, 'ComposeDeploymentUpgradeDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Upgrade'} - - def start_rollback_compose_deployment_upgrade( - self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Starts rolling back a compose deployment upgrade in the Service Fabric - cluster. - - Rollback a service fabric compose deployment upgrade. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4-preview" - - # Construct URL - url = self.start_rollback_compose_deployment_upgrade.metadata['url'] - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_rollback_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/RollbackUpgrade'} - - def get_chaos( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the status of Chaos. - - Get the status of Chaos indicating whether or not Chaos is running, the - Chaos parameters used for running Chaos and the status of the Chaos - Schedule. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: Chaos or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.Chaos or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.2" - - # Construct URL - url = self.get_chaos.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('Chaos', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_chaos.metadata = {'url': '/Tools/Chaos'} - - def start_chaos( - self, chaos_parameters, timeout=60, custom_headers=None, raw=False, **operation_config): - """Starts Chaos in the cluster. - - If Chaos is not already running in the cluster, it starts Chaos with - the passed in Chaos parameters. - If Chaos is already running when this call is made, the call fails with - the error code FABRIC_E_CHAOS_ALREADY_RUNNING. - Refer to the article [Induce controlled Chaos in Service Fabric - clusters](https://docs.microsoft.com/azure/service-fabric/service-fabric-controlled-chaos) - for more details. - - :param chaos_parameters: Describes all the parameters to configure a - Chaos run. - :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.start_chaos.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(chaos_parameters, 'ChaosParameters') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_chaos.metadata = {'url': '/Tools/Chaos/$/Start'} - - def stop_chaos( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Stops Chaos if it is running in the cluster and put the Chaos Schedule - in a stopped state. - - Stops Chaos from executing new faults. In-flight faults will continue - to execute until they are complete. The current Chaos Schedule is put - into a stopped state. - Once a schedule is stopped, it will stay in the stopped state and not - be used to Chaos Schedule new runs of Chaos. A new Chaos Schedule must - be set in order to resume scheduling. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.stop_chaos.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - stop_chaos.metadata = {'url': '/Tools/Chaos/$/Stop'} - - def get_chaos_events( - self, continuation_token=None, start_time_utc=None, end_time_utc=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the next segment of the Chaos events based on the continuation - token or the time range. - - To get the next segment of the Chaos events, you can specify the - ContinuationToken. To get the start of a new segment of Chaos events, - you can specify the time range - through StartTimeUtc and EndTimeUtc. You cannot specify both the - ContinuationToken and the time range in the same call. - When there are more than 100 Chaos events, the Chaos events are - returned in multiple segments where a segment contains no more than 100 - Chaos events and to get the next segment you make a call to this API - with the continuation token. - - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param start_time_utc: The Windows file time representing the start - time of the time range for which a Chaos report is to be generated. - Consult [DateTime.ToFileTimeUtc - Method](https://msdn.microsoft.com/library/system.datetime.tofiletimeutc(v=vs.110).aspx) - for details. - :type start_time_utc: str - :param end_time_utc: The Windows file time representing the end time - of the time range for which a Chaos report is to be generated. Consult - [DateTime.ToFileTimeUtc - Method](https://msdn.microsoft.com/library/system.datetime.tofiletimeutc(v=vs.110).aspx) - for details. - :type end_time_utc: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ChaosEventsSegment or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ChaosEventsSegment or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.2" - - # Construct URL - url = self.get_chaos_events.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if start_time_utc is not None: - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - if end_time_utc is not None: - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ChaosEventsSegment', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_chaos_events.metadata = {'url': '/Tools/Chaos/Events'} - - def get_chaos_schedule( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the Chaos Schedule defining when and how to run Chaos. - - Gets the version of the Chaos Schedule in use and the Chaos Schedule - that defines when and how to run Chaos. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ChaosScheduleDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ChaosScheduleDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.2" - - # Construct URL - url = self.get_chaos_schedule.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ChaosScheduleDescription', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} - - def post_chaos_schedule( - self, timeout=60, version=None, schedule=None, custom_headers=None, raw=False, **operation_config): - """Set the schedule used by Chaos. - - Chaos will automatically schedule runs based on the Chaos Schedule. - The Chaos Schedule will be updated if the provided version matches the - version on the server. - When updating the Chaos Schedule, the version on the server is - incremented by 1. - The version on the server will wrap back to 0 after reaching a large - number. - If Chaos is running when this call is made, the call will fail. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param version: The version number of the Schedule. - :type version: int - :param schedule: Defines the schedule used by Chaos. - :type schedule: ~azure.servicefabric.models.ChaosSchedule - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - chaos_schedule = models.ChaosScheduleDescription(version=version, schedule=schedule) - - api_version = "6.2" - - # Construct URL - url = self.post_chaos_schedule.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(chaos_schedule, 'ChaosScheduleDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - post_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} - - def upload_file( - self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): - """Uploads contents of the file to the image store. - - Uploads contents of the file to the image store. Use this API if the - file is small enough to upload again if the connection fails. The - file's data needs to be added to the request body. The contents will be - uploaded to the specified path. Image store service uses a mark file to - indicate the availability of the folder. The mark file is an empty file - named "_.dir". The mark file is generated by the image store service - when all files in a folder are uploaded. When using File-by-File - approach to upload application package in REST, the image store service - isn't aware of the file hierarchy of the application package; you need - to create a mark file per folder and upload it last, to let the image - store service know that the folder is complete. - - :param content_path: Relative path to file or folder in the image - store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.upload_file.metadata['url'] - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - upload_file.metadata = {'url': '/ImageStore/{contentPath}'} - - def get_image_store_content( - self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the image store content information. - - Returns the information about the image store content at the specified - contentPath. The contentPath is relative to the root of the image - store. - - :param content_path: Relative path to file or folder in the image - store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ImageStoreContent or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ImageStoreContent or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.2" - - # Construct URL - url = self.get_image_store_content.metadata['url'] - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ImageStoreContent', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} - - def delete_image_store_content( - self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): - """Deletes existing image store content. - - Deletes existing image store content being found within the given image - store relative path. This command can be used to delete uploaded - application packages once they are provisioned. - - :param content_path: Relative path to file or folder in the image - store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.delete_image_store_content.metadata['url'] - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} - - def get_image_store_root_content( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the content information at the root of the image store. - - Returns the information about the image store content at the root of - the image store. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ImageStoreContent or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ImageStoreContent or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_image_store_root_content.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ImageStoreContent', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_image_store_root_content.metadata = {'url': '/ImageStore'} - - def copy_image_store_content( - self, image_store_copy_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Copies image store content internally. - - Copies the image store content from the source image store relative - path to the destination image store relative path. - - :param image_store_copy_description: Describes the copy description - for the image store. - :type image_store_copy_description: - ~azure.servicefabric.models.ImageStoreCopyDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.copy_image_store_content.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(image_store_copy_description, 'ImageStoreCopyDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - copy_image_store_content.metadata = {'url': '/ImageStore/$/Copy'} - - def delete_image_store_upload_session( - self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Cancels an image store upload session. - - The DELETE request will cause the existing upload session to expire and - remove any previously uploaded file chunks. - - :param session_id: A GUID generated by the user for a file uploading. - It identifies an image store upload session which keeps track of all - file chunks until it is committed. - :type session_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.delete_image_store_upload_session.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete_image_store_upload_session.metadata = {'url': '/ImageStore/$/DeleteUploadSession'} - - def commit_image_store_upload_session( - self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Commit an image store upload session. - - When all file chunks have been uploaded, the upload session needs to be - committed explicitly to complete the upload. Image store preserves the - upload session until the expiration time, which is 30 minutes after the - last chunk received. . - - :param session_id: A GUID generated by the user for a file uploading. - It identifies an image store upload session which keeps track of all - file chunks until it is committed. - :type session_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.commit_image_store_upload_session.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - commit_image_store_upload_session.metadata = {'url': '/ImageStore/$/CommitUploadSession'} - - def get_image_store_upload_session_by_id( - self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the image store upload session by ID. - - Gets the image store upload session identified by the given ID. User - can query the upload session at any time during uploading. . - - :param session_id: A GUID generated by the user for a file uploading. - It identifies an image store upload session which keeps track of all - file chunks until it is committed. - :type session_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: UploadSession or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.UploadSession or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_image_store_upload_session_by_id.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UploadSession', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_image_store_upload_session_by_id.metadata = {'url': '/ImageStore/$/GetUploadSession'} - - def get_image_store_upload_session_by_path( - self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the image store upload session by relative path. - - Gets the image store upload session associated with the given image - store relative path. User can query the upload session at any time - during uploading. . - - :param content_path: Relative path to file or folder in the image - store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: UploadSession or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.UploadSession or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_image_store_upload_session_by_path.metadata['url'] - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UploadSession', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_image_store_upload_session_by_path.metadata = {'url': '/ImageStore/{contentPath}/$/GetUploadSession'} - - def upload_file_chunk( - self, content_path, session_id, content_range, timeout=60, custom_headers=None, raw=False, **operation_config): - """Uploads a file chunk to the image store relative path. - - Uploads a file chunk to the image store with the specified upload - session ID and image store relative path. This API allows user to - resume the file upload operation. user doesn't have to restart the file - upload from scratch whenever there is a network interruption. Use this - option if the file size is large. - To perform a resumable file upload, user need to break the file into - multiple chunks and upload these chunks to the image store one-by-one. - Chunks don't have to be uploaded in order. If the file represented by - the image store relative path already exists, it will be overwritten - when the upload session commits. - - :param content_path: Relative path to file or folder in the image - store from its root. - :type content_path: str - :param session_id: A GUID generated by the user for a file uploading. - It identifies an image store upload session which keeps track of all - file chunks until it is committed. - :type session_id: str - :param content_range: When uploading file chunks to the image store, - the Content-Range header field need to be configured and sent with a - request. The format should looks like "bytes - {First-Byte-Position}-{Last-Byte-Position}/{File-Length}". For - example, Content-Range:bytes 300-5000/20000 indicates that user is - sending bytes 300 through 5,000 and the total file length is 20,000 - bytes. - :type content_range: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.upload_file_chunk.metadata['url'] - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - header_parameters['Content-Range'] = self._serialize.header("content_range", content_range, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - upload_file_chunk.metadata = {'url': '/ImageStore/{contentPath}/$/UploadChunk'} - - def get_image_store_root_folder_size( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the folder size at the root of the image store. - - Returns the total size of files at the root and children folders in - image store. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: FolderSizeInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.FolderSizeInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.5" - - # Construct URL - url = self.get_image_store_root_folder_size.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('FolderSizeInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_image_store_root_folder_size.metadata = {'url': '/ImageStore/$/FolderSize'} - - def get_image_store_folder_size( - self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): - """Get the size of a folder in image store. - - Gets the total size of file under a image store folder, specified by - contentPath. The contentPath is relative to the root of the image - store. - - :param content_path: Relative path to file or folder in the image - store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: FolderSizeInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.FolderSizeInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.5" - - # Construct URL - url = self.get_image_store_folder_size.metadata['url'] - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('FolderSizeInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_image_store_folder_size.metadata = {'url': '/ImageStore/{contentPath}/$/FolderSize'} - - def get_image_store_info( - self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the overall ImageStore information. - - Returns information about the primary ImageStore replica, such as disk - capacity and available disk space at the node it is on, and several - categories of the ImageStore's file system usage. - - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: ImageStoreInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ImageStoreInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.5" - - # Construct URL - url = self.get_image_store_info.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ImageStoreInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_image_store_info.metadata = {'url': '/ImageStore/$/Info'} - - def invoke_infrastructure_command( - self, command, service_id=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Invokes an administrative command on the given Infrastructure Service - instance. - - For clusters that have one or more instances of the Infrastructure - Service configured, - this API provides a way to send infrastructure-specific commands to a - particular - instance of the Infrastructure Service. - Available commands and their corresponding response formats vary - depending upon - the infrastructure on which the cluster is running. - This API supports the Service Fabric platform; it is not meant to be - used directly from your code. - - :param command: The text of the command to be invoked. The content of - the command is infrastructure-specific. - :type command: str - :param service_id: The identity of the infrastructure service. This is - the full name of the infrastructure service without the 'fabric:' URI - scheme. This parameter required only for the cluster that has more - than one instance of infrastructure service running. - :type service_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: str or ClientRawResponse if raw=true - :rtype: str or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.invoke_infrastructure_command.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['Command'] = self._serialize.query("command", command, 'str') - if service_id is not None: - query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('str', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - invoke_infrastructure_command.metadata = {'url': '/$/InvokeInfrastructureCommand'} - - def invoke_infrastructure_query( - self, command, service_id=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Invokes a read-only query on the given infrastructure service instance. - - For clusters that have one or more instances of the Infrastructure - Service configured, - this API provides a way to send infrastructure-specific queries to a - particular - instance of the Infrastructure Service. - Available commands and their corresponding response formats vary - depending upon - the infrastructure on which the cluster is running. - This API supports the Service Fabric platform; it is not meant to be - used directly from your code. - - :param command: The text of the command to be invoked. The content of - the command is infrastructure-specific. - :type command: str - :param service_id: The identity of the infrastructure service. This is - the full name of the infrastructure service without the 'fabric:' URI - scheme. This parameter required only for the cluster that has more - than one instance of infrastructure service running. - :type service_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: str or ClientRawResponse if raw=true - :rtype: str or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.invoke_infrastructure_query.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['Command'] = self._serialize.query("command", command, 'str') - if service_id is not None: - query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('str', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - invoke_infrastructure_query.metadata = {'url': '/$/InvokeInfrastructureQuery'} - - def start_data_loss( - self, service_id, partition_id, operation_id, data_loss_mode, timeout=60, custom_headers=None, raw=False, **operation_config): - """This API will induce data loss for the specified partition. It will - trigger a call to the OnDataLossAsync API of the partition. - - This API will induce data loss for the specified partition. It will - trigger a call to the OnDataLoss API of the partition. - Actual data loss will depend on the specified DataLossMode. - - PartialDataLoss - Only a quorum of replicas are removed and - OnDataLoss is triggered for the partition but actual data loss depends - on the presence of in-flight replication. - - FullDataLoss - All replicas are removed hence all data is lost and - OnDataLoss is triggered. - This API should only be called with a stateful service as the target. - Calling this API with a system service as the target is not advised. - Note: Once this API has been called, it cannot be reversed. Calling - CancelOperation will only stop execution and clean up internal system - state. - It will not restore data if the command has progressed far enough to - cause data loss. - Call the GetDataLossProgress API with the same OperationId to return - information on the operation started with this API. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This - is passed into the corresponding GetProgress API - :type operation_id: str - :param data_loss_mode: This enum is passed to the StartDataLoss API to - indicate what type of data loss to induce. Possible values include: - 'Invalid', 'PartialDataLoss', 'FullDataLoss' - :type data_loss_mode: str or ~azure.servicefabric.models.DataLossMode - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.start_data_loss.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['DataLossMode'] = self._serialize.query("data_loss_mode", data_loss_mode, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_data_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartDataLoss'} - - def get_data_loss_progress( - self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the progress of a partition data loss operation started using the - StartDataLoss API. - - Gets the progress of a data loss operation started with StartDataLoss, - using the OperationId. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This - is passed into the corresponding GetProgress API - :type operation_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PartitionDataLossProgress or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PartitionDataLossProgress or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_data_loss_progress.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PartitionDataLossProgress', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_data_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetDataLossProgress'} - - def start_quorum_loss( - self, service_id, partition_id, operation_id, quorum_loss_mode, quorum_loss_duration, timeout=60, custom_headers=None, raw=False, **operation_config): - """Induces quorum loss for a given stateful service partition. - - This API is useful for a temporary quorum loss situation on your - service. - Call the GetQuorumLossProgress API with the same OperationId to return - information on the operation started with this API. - This can only be called on stateful persisted (HasPersistedState==true) - services. Do not use this API on stateless services or stateful - in-memory only services. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This - is passed into the corresponding GetProgress API - :type operation_id: str - :param quorum_loss_mode: This enum is passed to the StartQuorumLoss - API to indicate what type of quorum loss to induce. Possible values - include: 'Invalid', 'QuorumReplicas', 'AllReplicas' - :type quorum_loss_mode: str or - ~azure.servicefabric.models.QuorumLossMode - :param quorum_loss_duration: The amount of time for which the - partition will be kept in quorum loss. This must be specified in - seconds. - :type quorum_loss_duration: int - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.start_quorum_loss.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['QuorumLossMode'] = self._serialize.query("quorum_loss_mode", quorum_loss_mode, 'str') - query_parameters['QuorumLossDuration'] = self._serialize.query("quorum_loss_duration", quorum_loss_duration, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_quorum_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartQuorumLoss'} - - def get_quorum_loss_progress( - self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the progress of a quorum loss operation on a partition started - using the StartQuorumLoss API. - - Gets the progress of a quorum loss operation started with - StartQuorumLoss, using the provided OperationId. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This - is passed into the corresponding GetProgress API - :type operation_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PartitionQuorumLossProgress or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PartitionQuorumLossProgress or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_quorum_loss_progress.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PartitionQuorumLossProgress', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_quorum_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetQuorumLossProgress'} - - def start_partition_restart( - self, service_id, partition_id, operation_id, restart_partition_mode, timeout=60, custom_headers=None, raw=False, **operation_config): - """This API will restart some or all replicas or instances of the - specified partition. - - This API is useful for testing failover. - If used to target a stateless service partition, RestartPartitionMode - must be AllReplicasOrInstances. - Call the GetPartitionRestartProgress API using the same OperationId to - get the progress. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This - is passed into the corresponding GetProgress API - :type operation_id: str - :param restart_partition_mode: Describe which partitions to restart. - Possible values include: 'Invalid', 'AllReplicasOrInstances', - 'OnlyActiveSecondaries' - :type restart_partition_mode: str or - ~azure.servicefabric.models.RestartPartitionMode - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.start_partition_restart.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['RestartPartitionMode'] = self._serialize.query("restart_partition_mode", restart_partition_mode, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_partition_restart.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartRestart'} - - def get_partition_restart_progress( - self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the progress of a PartitionRestart operation started using - StartPartitionRestart. - - Gets the progress of a PartitionRestart started with - StartPartitionRestart using the provided OperationId. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This - is passed into the corresponding GetProgress API - :type operation_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PartitionRestartProgress or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PartitionRestartProgress or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_partition_restart_progress.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PartitionRestartProgress', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_restart_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetRestartProgress'} - - def start_node_transition( - self, node_name, operation_id, node_transition_type, node_instance_id, stop_duration_in_seconds, timeout=60, custom_headers=None, raw=False, **operation_config): - """Starts or stops a cluster node. - - Starts or stops a cluster node. A cluster node is a process, not the - OS instance itself. To start a node, pass in "Start" for the - NodeTransitionType parameter. - To stop a node, pass in "Stop" for the NodeTransitionType parameter. - This API starts the operation - when the API returns the node may not - have finished transitioning yet. - Call GetNodeTransitionProgress with the same OperationId to get the - progress of the operation. - - :param node_name: The name of the node. - :type node_name: str - :param operation_id: A GUID that identifies a call of this API. This - is passed into the corresponding GetProgress API - :type operation_id: str - :param node_transition_type: Indicates the type of transition to - perform. NodeTransitionType.Start will start a stopped node. - NodeTransitionType.Stop will stop a node that is up. Possible values - include: 'Invalid', 'Start', 'Stop' - :type node_transition_type: str or - ~azure.servicefabric.models.NodeTransitionType - :param node_instance_id: The node instance ID of the target node. - This can be determined through GetNodeInfo API. - :type node_instance_id: str - :param stop_duration_in_seconds: The duration, in seconds, to keep the - node stopped. The minimum value is 600, the maximum is 14400. After - this time expires, the node will automatically come back up. - :type stop_duration_in_seconds: int - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.start_node_transition.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['NodeTransitionType'] = self._serialize.query("node_transition_type", node_transition_type, 'str') - query_parameters['NodeInstanceId'] = self._serialize.query("node_instance_id", node_instance_id, 'str') - query_parameters['StopDurationInSeconds'] = self._serialize.query("stop_duration_in_seconds", stop_duration_in_seconds, 'int', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - start_node_transition.metadata = {'url': '/Faults/Nodes/{nodeName}/$/StartTransition/'} - - def get_node_transition_progress( - self, node_name, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the progress of an operation started using StartNodeTransition. - - Gets the progress of an operation started with StartNodeTransition - using the provided OperationId. - - :param node_name: The name of the node. - :type node_name: str - :param operation_id: A GUID that identifies a call of this API. This - is passed into the corresponding GetProgress API - :type operation_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: NodeTransitionProgress or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.NodeTransitionProgress or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_node_transition_progress.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NodeTransitionProgress', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_node_transition_progress.metadata = {'url': '/Faults/Nodes/{nodeName}/$/GetTransitionProgress'} - - def get_fault_operation_list( - self, type_filter=65535, state_filter=65535, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets a list of user-induced fault operations filtered by provided - input. - - Gets the list of user-induced fault operations filtered by provided - input. - - :param type_filter: Used to filter on OperationType for user-induced - operations. - - 65535 - select all - - 1 - select PartitionDataLoss. - - 2 - select PartitionQuorumLoss. - - 4 - select PartitionRestart. - - 8 - select NodeTransition. - :type type_filter: int - :param state_filter: Used to filter on OperationState's for - user-induced operations. - - 65535 - select All - - 1 - select Running - - 2 - select RollingBack - - 8 - select Completed - - 16 - select Faulted - - 32 - select Cancelled - - 64 - select ForceCancelled - :type state_filter: int - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.OperationStatus] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_fault_operation_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['TypeFilter'] = self._serialize.query("type_filter", type_filter, 'int') - query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[OperationStatus]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_fault_operation_list.metadata = {'url': '/Faults/'} - - def cancel_operation( - self, operation_id, force=False, timeout=60, custom_headers=None, raw=False, **operation_config): - """Cancels a user-induced fault operation. - - The following APIs start fault operations that may be cancelled by - using CancelOperation: StartDataLoss, StartQuorumLoss, - StartPartitionRestart, StartNodeTransition. - If force is false, then the specified user-induced operation will be - gracefully stopped and cleaned up. If force is true, the command will - be aborted, and some internal state - may be left behind. Specifying force as true should be used with care. - Calling this API with force set to true is not allowed until this API - has already - been called on the same test command with force set to false first, or - unless the test command already has an OperationState of - OperationState.RollingBack. - Clarification: OperationState.RollingBack means that the system will - be/is cleaning up internal system state caused by executing the - command. It will not restore data if the - test command was to cause data loss. For example, if you call - StartDataLoss then call this API, the system will only clean up - internal state from running the command. - It will not restore the target partition's data, if the command - progressed far enough to cause data loss. - Important note: if this API is invoked with force==true, internal - state may be left behind. - - :param operation_id: A GUID that identifies a call of this API. This - is passed into the corresponding GetProgress API - :type operation_id: str - :param force: Indicates whether to gracefully roll back and clean up - internal system state modified by executing the user-induced - operation. - :type force: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.cancel_operation.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['Force'] = self._serialize.query("force", force, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - cancel_operation.metadata = {'url': '/Faults/$/Cancel'} - - def create_backup_policy( - self, backup_policy_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Creates a backup policy. - - Creates a backup policy which can be associated later with a Service - Fabric application, service or a partition for periodic backup. - - :param backup_policy_description: Describes the backup policy. - :type backup_policy_description: - ~azure.servicefabric.models.BackupPolicyDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.create_backup_policy.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - create_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/$/Create'} - - def delete_backup_policy( - self, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Deletes the backup policy. - - Deletes an existing backup policy. A backup policy must be created - before it can be deleted. A currently active backup policy, associated - with any Service Fabric application, service or partition, cannot be - deleted without first deleting the mapping. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.delete_backup_policy.metadata['url'] - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Delete'} - - def get_backup_policy_list( - self, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets all the backup policies configured. - - Get a list of all the backup policies configured. - - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedBackupPolicyDescriptionList or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.PagedBackupPolicyDescriptionList - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_backup_policy_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedBackupPolicyDescriptionList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_backup_policy_list.metadata = {'url': '/BackupRestore/BackupPolicies'} - - def get_backup_policy_by_name( - self, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets a particular backup policy by name. - - Gets a particular backup policy identified by {backupPolicyName}. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: BackupPolicyDescription or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.BackupPolicyDescription or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_backup_policy_by_name.metadata['url'] - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BackupPolicyDescription', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_backup_policy_by_name.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}'} - - def get_all_entities_backed_up_by_policy( - self, backup_policy_name, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the list of backup entities that are associated with this policy. - - Returns a list of Service Fabric application, service or partition - which are associated with this backup policy. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedBackupEntityList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedBackupEntityList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_all_entities_backed_up_by_policy.metadata['url'] - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedBackupEntityList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_all_entities_backed_up_by_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/GetBackupEnabledEntities'} - - def update_backup_policy( - self, backup_policy_description, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Updates the backup policy. - - Updates the backup policy identified by {backupPolicyName}. - - :param backup_policy_description: Describes the backup policy. - :type backup_policy_description: - ~azure.servicefabric.models.BackupPolicyDescription - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.update_backup_policy.metadata['url'] - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - update_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Update'} - - def enable_application_backup( - self, application_id, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Enables periodic backup of stateful partitions under this Service - Fabric application. - - Enables periodic backup of stateful partitions which are part of this - Service Fabric application. Each partition is backed up individually as - per the specified backup policy description. - Note only C# based Reliable Actor and Reliable Stateful services are - currently supported for periodic backup. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param backup_policy_name: Name of the backup policy to be used for - enabling periodic backups. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - enable_backup_description = models.EnableBackupDescription(backup_policy_name=backup_policy_name) - - api_version = "6.4" - - # Construct URL - url = self.enable_application_backup.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(enable_backup_description, 'EnableBackupDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - enable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/EnableBackup'} - - def disable_application_backup( - self, application_id, clean_backup, timeout=60, custom_headers=None, raw=False, **operation_config): - """Disables periodic backup of Service Fabric application. - - Disables periodic backup of Service Fabric application which was - previously enabled. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param clean_backup: Boolean flag to delete backups. It can be set to - true for deleting all the backups which were created for the backup - entity that is getting disabled for backup. - :type clean_backup: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - disable_backup_description = None - if clean_backup is not None: - disable_backup_description = models.DisableBackupDescription(clean_backup=clean_backup) - - api_version = "6.4" - - # Construct URL - url = self.disable_application_backup.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if disable_backup_description is not None: - body_content = self._serialize.body(disable_backup_description, 'DisableBackupDescription') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - disable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/DisableBackup'} - - def get_application_backup_configuration_info( - self, application_id, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the Service Fabric application backup configuration information. - - Gets the Service Fabric backup configuration information for the - application and the services and partitions under this application. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedBackupConfigurationInfoList or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_application_backup_configuration_info.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedBackupConfigurationInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_backup_configuration_info.metadata = {'url': '/Applications/{applicationId}/$/GetBackupConfigurationInfo'} - - def get_application_backup_list( - self, application_id, timeout=60, latest=False, start_date_time_filter=None, end_date_time_filter=None, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): - """Gets the list of backups available for every partition in this - application. - - Returns a list of backups available for every partition in this Service - Fabric application. The server enumerates all the backups available at - the backup location configured in the backup policy. It also allows - filtering of the result based on start and end datetime or just - fetching the latest available backup for every partition. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param latest: Specifies whether to get only the most recent backup - available for a partition for the specified time range. - :type latest: bool - :param start_date_time_filter: Specify the start date time from which - to enumerate backups, in datetime format. The date time must be - specified in ISO8601 format. This is an optional parameter. If not - specified, all backups from the beginning are enumerated. - :type start_date_time_filter: datetime - :param end_date_time_filter: Specify the end date time till which to - enumerate backups, in datetime format. The date time must be specified - in ISO8601 format. This is an optional parameter. If not specified, - enumeration is done till the most recent backup. - :type end_date_time_filter: datetime - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedBackupInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedBackupInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_application_backup_list.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if latest is not None: - query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') - if start_date_time_filter is not None: - query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') - if end_date_time_filter is not None: - query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedBackupInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_backup_list.metadata = {'url': '/Applications/{applicationId}/$/GetBackups'} - - def suspend_application_backup( - self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Suspends periodic backup for the specified Service Fabric application. - - The application which is configured to take periodic backups, is - suspended for taking further backups till it is resumed again. This - operation applies to the entire application's hierarchy. It means all - the services and partitions under this application are now suspended - for backup. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.suspend_application_backup.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - suspend_application_backup.metadata = {'url': '/Applications/{applicationId}/$/SuspendBackup'} - - def resume_application_backup( - self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Resumes periodic backup of a Service Fabric application which was - previously suspended. - - The previously suspended Service Fabric application resumes taking - periodic backup as per the backup policy currently configured for the - same. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.resume_application_backup.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - resume_application_backup.metadata = {'url': '/Applications/{applicationId}/$/ResumeBackup'} - - def enable_service_backup( - self, service_id, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Enables periodic backup of stateful partitions under this Service - Fabric service. - - Enables periodic backup of stateful partitions which are part of this - Service Fabric service. Each partition is backed up individually as per - the specified backup policy description. In case the application, which - the service is part of, is already enabled for backup then this - operation would override the policy being used to take the periodic - backup for this service and its partitions (unless explicitly - overridden at the partition level). - Note only C# based Reliable Actor and Reliable Stateful services are - currently supported for periodic backup. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param backup_policy_name: Name of the backup policy to be used for - enabling periodic backups. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - enable_backup_description = models.EnableBackupDescription(backup_policy_name=backup_policy_name) - - api_version = "6.4" - - # Construct URL - url = self.enable_service_backup.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(enable_backup_description, 'EnableBackupDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - enable_service_backup.metadata = {'url': '/Services/{serviceId}/$/EnableBackup'} - - def disable_service_backup( - self, service_id, clean_backup, timeout=60, custom_headers=None, raw=False, **operation_config): - """Disables periodic backup of Service Fabric service which was previously - enabled. - - Disables periodic backup of Service Fabric service which was previously - enabled. Backup must be explicitly enabled before it can be disabled. - In case the backup is enabled for the Service Fabric application, which - this service is part of, this service would continue to be periodically - backed up as per the policy mapped at the application level. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param clean_backup: Boolean flag to delete backups. It can be set to - true for deleting all the backups which were created for the backup - entity that is getting disabled for backup. - :type clean_backup: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - disable_backup_description = None - if clean_backup is not None: - disable_backup_description = models.DisableBackupDescription(clean_backup=clean_backup) - - api_version = "6.4" - - # Construct URL - url = self.disable_service_backup.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if disable_backup_description is not None: - body_content = self._serialize.body(disable_backup_description, 'DisableBackupDescription') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - disable_service_backup.metadata = {'url': '/Services/{serviceId}/$/DisableBackup'} - - def get_service_backup_configuration_info( - self, service_id, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the Service Fabric service backup configuration information. - - Gets the Service Fabric backup configuration information for the - service and the partitions under this service. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedBackupConfigurationInfoList or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_service_backup_configuration_info.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedBackupConfigurationInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_backup_configuration_info.metadata = {'url': '/Services/{serviceId}/$/GetBackupConfigurationInfo'} - - def get_service_backup_list( - self, service_id, timeout=60, latest=False, start_date_time_filter=None, end_date_time_filter=None, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): - """Gets the list of backups available for every partition in this service. - - Returns a list of backups available for every partition in this Service - Fabric service. The server enumerates all the backups available in the - backup store configured in the backup policy. It also allows filtering - of the result based on start and end datetime or just fetching the - latest available backup for every partition. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param latest: Specifies whether to get only the most recent backup - available for a partition for the specified time range. - :type latest: bool - :param start_date_time_filter: Specify the start date time from which - to enumerate backups, in datetime format. The date time must be - specified in ISO8601 format. This is an optional parameter. If not - specified, all backups from the beginning are enumerated. - :type start_date_time_filter: datetime - :param end_date_time_filter: Specify the end date time till which to - enumerate backups, in datetime format. The date time must be specified - in ISO8601 format. This is an optional parameter. If not specified, - enumeration is done till the most recent backup. - :type end_date_time_filter: datetime - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedBackupInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedBackupInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_service_backup_list.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if latest is not None: - query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') - if start_date_time_filter is not None: - query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') - if end_date_time_filter is not None: - query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedBackupInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_backup_list.metadata = {'url': '/Services/{serviceId}/$/GetBackups'} - - def suspend_service_backup( - self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Suspends periodic backup for the specified Service Fabric service. - - The service which is configured to take periodic backups, is suspended - for taking further backups till it is resumed again. This operation - applies to the entire service's hierarchy. It means all the partitions - under this service are now suspended for backup. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.suspend_service_backup.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - suspend_service_backup.metadata = {'url': '/Services/{serviceId}/$/SuspendBackup'} - - def resume_service_backup( - self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Resumes periodic backup of a Service Fabric service which was - previously suspended. - - The previously suspended Service Fabric service resumes taking periodic - backup as per the backup policy currently configured for the same. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.resume_service_backup.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - resume_service_backup.metadata = {'url': '/Services/{serviceId}/$/ResumeBackup'} - - def enable_partition_backup( - self, partition_id, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Enables periodic backup of the stateful persisted partition. - - Enables periodic backup of stateful persisted partition. Each partition - is backed up as per the specified backup policy description. In case - the application or service, which is partition is part of, is already - enabled for backup then this operation would override the policy being - used to take the periodic backup of this partition. - Note only C# based Reliable Actor and Reliable Stateful services are - currently supported for periodic backup. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param backup_policy_name: Name of the backup policy to be used for - enabling periodic backups. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - enable_backup_description = models.EnableBackupDescription(backup_policy_name=backup_policy_name) - - api_version = "6.4" - - # Construct URL - url = self.enable_partition_backup.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(enable_backup_description, 'EnableBackupDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - enable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/EnableBackup'} - - def disable_partition_backup( - self, partition_id, clean_backup, timeout=60, custom_headers=None, raw=False, **operation_config): - """Disables periodic backup of Service Fabric partition which was - previously enabled. - - Disables periodic backup of partition which was previously enabled. - Backup must be explicitly enabled before it can be disabled. - In case the backup is enabled for the Service Fabric application or - service, which this partition is part of, this partition would continue - to be periodically backed up as per the policy mapped at the higher - level entity. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param clean_backup: Boolean flag to delete backups. It can be set to - true for deleting all the backups which were created for the backup - entity that is getting disabled for backup. - :type clean_backup: bool - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - disable_backup_description = None - if clean_backup is not None: - disable_backup_description = models.DisableBackupDescription(clean_backup=clean_backup) - - api_version = "6.4" - - # Construct URL - url = self.disable_partition_backup.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if disable_backup_description is not None: - body_content = self._serialize.body(disable_backup_description, 'DisableBackupDescription') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - disable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/DisableBackup'} - - def get_partition_backup_configuration_info( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the partition backup configuration information. - - Gets the Service Fabric Backup configuration information for the - specified partition. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PartitionBackupConfigurationInfo or ClientRawResponse if - raw=true - :rtype: ~azure.servicefabric.models.PartitionBackupConfigurationInfo - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_partition_backup_configuration_info.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PartitionBackupConfigurationInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_backup_configuration_info.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupConfigurationInfo'} - - def get_partition_backup_list( - self, partition_id, timeout=60, latest=False, start_date_time_filter=None, end_date_time_filter=None, custom_headers=None, raw=False, **operation_config): - """Gets the list of backups available for the specified partition. - - Returns a list of backups available for the specified partition. The - server enumerates all the backups available in the backup store - configured in the backup policy. It also allows filtering of the result - based on start and end datetime or just fetching the latest available - backup for the partition. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param latest: Specifies whether to get only the most recent backup - available for a partition for the specified time range. - :type latest: bool - :param start_date_time_filter: Specify the start date time from which - to enumerate backups, in datetime format. The date time must be - specified in ISO8601 format. This is an optional parameter. If not - specified, all backups from the beginning are enumerated. - :type start_date_time_filter: datetime - :param end_date_time_filter: Specify the end date time till which to - enumerate backups, in datetime format. The date time must be specified - in ISO8601 format. This is an optional parameter. If not specified, - enumeration is done till the most recent backup. - :type end_date_time_filter: datetime - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedBackupInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedBackupInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_partition_backup_list.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if latest is not None: - query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') - if start_date_time_filter is not None: - query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') - if end_date_time_filter is not None: - query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedBackupInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_backup_list.metadata = {'url': '/Partitions/{partitionId}/$/GetBackups'} - - def suspend_partition_backup( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Suspends periodic backup for the specified partition. - - The partition which is configured to take periodic backups, is - suspended for taking further backups till it is resumed again. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.suspend_partition_backup.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - suspend_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/SuspendBackup'} - - def resume_partition_backup( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Resumes periodic backup of partition which was previously suspended. - - The previously suspended partition resumes taking periodic backup as - per the backup policy currently configured for the same. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.resume_partition_backup.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - resume_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/ResumeBackup'} - - def backup_partition( - self, partition_id, backup_timeout=10, timeout=60, backup_storage=None, custom_headers=None, raw=False, **operation_config): - """Triggers backup of the partition's state. - - Creates a backup of the stateful persisted partition's state. In case - the partition is already being periodically backed up, then by default - the new backup is created at the same backup storage. One can also - override the same by specifying the backup storage details as part of - the request body. Once the backup is initiated, its progress can be - tracked using the GetBackupProgress operation. - In case, the operation times out, specify a greater backup timeout - value in the query parameter. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param backup_timeout: Specifies the maximum amount of time, in - minutes, to wait for the backup operation to complete. Post that, the - operation completes with timeout error. However, in certain corner - cases it could be that though the operation returns back timeout, the - backup actually goes through. In case of timeout error, its - recommended to invoke this operation again with a greater timeout - value. The default value for the same is 10 minutes. - :type backup_timeout: int - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param backup_storage: Specifies the details of the backup storage - where to save the backup. - :type backup_storage: - ~azure.servicefabric.models.BackupStorageDescription - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - backup_partition_description = None - if backup_storage is not None: - backup_partition_description = models.BackupPartitionDescription(backup_storage=backup_storage) - - api_version = "6.4" - - # Construct URL - url = self.backup_partition.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if backup_timeout is not None: - query_parameters['BackupTimeout'] = self._serialize.query("backup_timeout", backup_timeout, 'int') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - if backup_partition_description is not None: - body_content = self._serialize.body(backup_partition_description, 'BackupPartitionDescription') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - backup_partition.metadata = {'url': '/Partitions/{partitionId}/$/Backup'} - - def get_partition_backup_progress( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets details for the latest backup triggered for this partition. - - Returns information about the state of the latest backup along with - details or failure reason in case of completion. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: BackupProgressInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.BackupProgressInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_partition_backup_progress.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BackupProgressInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_backup_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupProgress'} - - def restore_partition( - self, partition_id, restore_partition_description, restore_timeout=10, timeout=60, custom_headers=None, raw=False, **operation_config): - """Triggers restore of the state of the partition using the specified - restore partition description. - - Restores the state of a of the stateful persisted partition using the - specified backup point. In case the partition is already being - periodically backed up, then by default the backup point is looked for - in the storage specified in backup policy. One can also override the - same by specifying the backup storage details as part of the restore - partition description in body. Once the restore is initiated, its - progress can be tracked using the GetRestoreProgress operation. - In case, the operation times out, specify a greater restore timeout - value in the query parameter. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param restore_partition_description: Describes the parameters to - restore the partition. - :type restore_partition_description: - ~azure.servicefabric.models.RestorePartitionDescription - :param restore_timeout: Specifies the maximum amount of time to wait, - in minutes, for the restore operation to complete. Post that, the - operation returns back with timeout error. However, in certain corner - cases it could be that the restore operation goes through even though - it completes with timeout. In case of timeout error, its recommended - to invoke this operation again with a greater timeout value. the - default value for the same is 10 minutes. - :type restore_timeout: int - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.restore_partition.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if restore_timeout is not None: - query_parameters['RestoreTimeout'] = self._serialize.query("restore_timeout", restore_timeout, 'int') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(restore_partition_description, 'RestorePartitionDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - restore_partition.metadata = {'url': '/Partitions/{partitionId}/$/Restore'} - - def get_partition_restore_progress( - self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets details for the latest restore operation triggered for this - partition. - - Returns information about the state of the latest restore operation - along with details or failure reason in case of completion. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: RestoreProgressInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.RestoreProgressInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_partition_restore_progress.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('RestoreProgressInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_restore_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetRestoreProgress'} - - def get_backups_from_backup_location( - self, get_backup_by_storage_query_description, timeout=60, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): - """Gets the list of backups available for the specified backed up entity - at the specified backup location. - - Gets the list of backups available for the specified backed up entity - (Application, Service or Partition) at the specified backup location - (FileShare or Azure Blob Storage). - - :param get_backup_by_storage_query_description: Describes the filters - and backup storage details to be used for enumerating backups. - :type get_backup_by_storage_query_description: - ~azure.servicefabric.models.GetBackupByStorageQueryDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param max_results: The maximum number of results to be returned as - part of the paged queries. This parameter defines the upper bound on - the number of results returned. The results returned can be less than - the specified maximum results if they do not fit in the message as per - the max message size restrictions defined in the configuration. If - this parameter is zero or not specified, the paged query includes as - many results as possible that fit in the return message. - :type max_results: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedBackupInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedBackupInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_backups_from_backup_location.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(get_backup_by_storage_query_description, 'GetBackupByStorageQueryDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedBackupInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_backups_from_backup_location.metadata = {'url': '/BackupRestore/$/GetBackups'} - - def create_name( - self, name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Creates a Service Fabric name. - - Creates the specified Service Fabric name. - - :param name: The Service Fabric name, including the 'fabric:' URI - scheme. - :type name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - name_description = models.NameDescription(name=name) - - api_version = "6.0" - - # Construct URL - url = self.create_name.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(name_description, 'NameDescription') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - create_name.metadata = {'url': '/Names/$/Create'} - - def get_name_exists_info( - self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Returns whether the Service Fabric name exists. - - Returns whether the specified Service Fabric name exists. - - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_name_exists_info.metadata['url'] - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - get_name_exists_info.metadata = {'url': '/Names/{nameId}'} - - def delete_name( - self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Deletes a Service Fabric name. - - Deletes the specified Service Fabric name. A name must be created - before it can be deleted. Deleting a name with child properties will - fail. - - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.delete_name.metadata['url'] - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete_name.metadata = {'url': '/Names/{nameId}'} - - def get_sub_name_info_list( - self, name_id, recursive=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Enumerates all the Service Fabric names under a given name. - - Enumerates all the Service Fabric names under a given name. If the - subnames do not fit in a page, one page of results is returned as well - as a continuation token, which can be used to get the next page. - Querying a name that doesn't exist will fail. - - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param recursive: Allows specifying that the search performed should - be recursive. - :type recursive: bool - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedSubNameInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedSubNameInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_sub_name_info_list.metadata['url'] - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if recursive is not None: - query_parameters['Recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedSubNameInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_sub_name_info_list.metadata = {'url': '/Names/{nameId}/$/GetSubNames'} - - def get_property_info_list( - self, name_id, include_values=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets information on all Service Fabric properties under a given name. - - A Service Fabric name can have one or more named properties that store - custom information. This operation gets the information about these - properties in a paged list. The information includes name, value, and - metadata about each of the properties. - - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param include_values: Allows specifying whether to include the values - of the properties returned. True if values should be returned with the - metadata; False to return only property metadata. - :type include_values: bool - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non-empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results, then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PagedPropertyInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedPropertyInfoList or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_property_info_list.metadata['url'] - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if include_values is not None: - query_parameters['IncludeValues'] = self._serialize.query("include_values", include_values, 'bool') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PagedPropertyInfoList', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_property_info_list.metadata = {'url': '/Names/{nameId}/$/GetProperties'} - - def put_property( - self, name_id, property_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Creates or updates a Service Fabric property. - - Creates or updates the specified Service Fabric property under a given - name. - - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param property_description: Describes the Service Fabric property to - be created. - :type property_description: - ~azure.servicefabric.models.PropertyDescription - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.put_property.metadata['url'] - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(property_description, 'PropertyDescription') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - put_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} - - def get_property_info( - self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the specified Service Fabric property. - - Gets the specified Service Fabric property under a given name. This - will always return both value and metadata. - - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param property_name: Specifies the name of the property to get. - :type property_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PropertyInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PropertyInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.get_property_info.metadata['url'] - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PropertyInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_property_info.metadata = {'url': '/Names/{nameId}/$/GetProperty'} - - def delete_property( - self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Deletes the specified Service Fabric property. - - Deletes the specified Service Fabric property under a given name. A - property must be created before it can be deleted. - - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param property_name: Specifies the name of the property to get. - :type property_name: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.0" - - # Construct URL - url = self.delete_property.metadata['url'] - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} - - def submit_property_batch( - self, name_id, timeout=60, operations=None, custom_headers=None, raw=False, **operation_config): - """Submits a property batch. - - Submits a batch of property operations. Either all or none of the - operations will be committed. - - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param operations: A list of the property batch operations to be - executed. - :type operations: - list[~azure.servicefabric.models.PropertyBatchOperation] - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: PropertyBatchInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PropertyBatchInfo or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - property_batch_description_list = models.PropertyBatchDescriptionList(operations=operations) - - api_version = "6.0" - - # Construct URL - url = self.submit_property_batch.metadata['url'] - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(property_batch_description_list, 'PropertyBatchDescriptionList') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 409]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SuccessfulPropertyBatchInfo', response) - if response.status_code == 409: - deserialized = self._deserialize('FailedPropertyBatchInfo', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - submit_property_batch.metadata = {'url': '/Names/{nameId}/$/GetProperties/$/SubmitBatch'} - - def get_cluster_event_list( - self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets all Cluster-related events. - - The response is list of ClusterEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ClusterEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_cluster_event_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ClusterEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_cluster_event_list.metadata = {'url': '/EventsStore/Cluster/Events'} - - def get_containers_event_list( - self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets all Containers-related events. - - The response is list of ContainerInstanceEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ContainerInstanceEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.2-preview" - - # Construct URL - url = self.get_containers_event_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ContainerInstanceEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_containers_event_list.metadata = {'url': '/EventsStore/Containers/Events'} - - def get_node_event_list( - self, node_name, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets a Node-related events. - - The response is list of NodeEvent objects. - - :param node_name: The name of the node. - :type node_name: str - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.NodeEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_node_event_list.metadata['url'] - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[NodeEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_node_event_list.metadata = {'url': '/EventsStore/Nodes/{nodeName}/$/Events'} - - def get_nodes_event_list( - self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets all Nodes-related Events. - - The response is list of NodeEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.NodeEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_nodes_event_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[NodeEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_nodes_event_list.metadata = {'url': '/EventsStore/Nodes/Events'} - - def get_application_event_list( - self, application_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets an Application-related events. - - The response is list of ApplicationEvent objects. - - :param application_id: The identity of the application. This is - typically the full name of the application without the 'fabric:' URI - scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the application name is "fabric:/myapp/app1", the - application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in - previous versions. - :type application_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ApplicationEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_application_event_list.metadata['url'] - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ApplicationEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_application_event_list.metadata = {'url': '/EventsStore/Applications/{applicationId}/$/Events'} - - def get_applications_event_list( - self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets all Applications-related events. - - The response is list of ApplicationEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ApplicationEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_applications_event_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ApplicationEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_applications_event_list.metadata = {'url': '/EventsStore/Applications/Events'} - - def get_service_event_list( - self, service_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets a Service-related events. - - The response is list of ServiceEvent objects. - - :param service_id: The identity of the service. This ID is typically - the full name of the service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the - "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the - service identity would be "myapp~app1~svc1" in 6.0+ and - "myapp/app1/svc1" in previous versions. - :type service_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ServiceEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_service_event_list.metadata['url'] - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ServiceEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_service_event_list.metadata = {'url': '/EventsStore/Services/{serviceId}/$/Events'} - - def get_services_event_list( - self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets all Services-related events. - - The response is list of ServiceEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ServiceEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_services_event_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ServiceEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_services_event_list.metadata = {'url': '/EventsStore/Services/Events'} - - def get_partition_event_list( - self, partition_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets a Partition-related events. - - The response is list of PartitionEvent objects. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.PartitionEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_partition_event_list.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[PartitionEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Events'} - - def get_partitions_event_list( - self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets all Partitions-related events. - - The response is list of PartitionEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.PartitionEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_partitions_event_list.metadata['url'] - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[PartitionEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partitions_event_list.metadata = {'url': '/EventsStore/Partitions/Events'} - - def get_partition_replica_event_list( - self, partition_id, replica_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets a Partition Replica-related events. - - The response is list of ReplicaEvent objects. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ReplicaEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_partition_replica_event_list.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ReplicaEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_replica_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/{replicaId}/$/Events'} - - def get_partition_replicas_event_list( - self, partition_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): - """Gets all Replicas-related events for a Partition. - - The response is list of ReplicaEvent objects. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC - yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string - specifying the types of FabricEvents that should only be included in - the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of - AnalysisEvents if true is passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of - CorrelatedEvents information if true is passed. otherwise the - CorrelationEvents get processed and HasCorrelatedEvents field in every - FabricEvent gets populated. - :type skip_correlation_lookup: bool - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.ReplicaEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_partition_replicas_event_list.metadata['url'] - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[ReplicaEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_partition_replicas_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/Events'} - - def get_correlated_event_list( - self, event_instance_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets all correlated events for a given event. - - The response is list of FabricEvents. - - :param event_instance_id: The EventInstanceId. - :type event_instance_id: str - :param timeout: The server timeout for performing the operation in - seconds. This timeout specifies the time duration that the client is - willing to wait for the requested operation to complete. The default - value for this parameter is 60 seconds. - :type timeout: long - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: list or ClientRawResponse if raw=true - :rtype: list[~azure.servicefabric.models.FabricEvent] or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`FabricErrorException` - """ - api_version = "6.4" - - # Construct URL - url = self.get_correlated_event_list.metadata['url'] - path_format_arguments = { - 'eventInstanceId': self._serialize.url("event_instance_id", event_instance_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.FabricErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[FabricEvent]', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_correlated_event_list.metadata = {'url': '/EventsStore/CorrelatedEvents/{eventInstanceId}/$/Events'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_apis_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_apis_operations.py new file mode 100644 index 000000000000..fec8f7cc0449 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_apis_operations.py @@ -0,0 +1,16866 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class ServiceFabricClientAPIsOperationsMixin(object): + + def get_cluster_manifest( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterManifest" + """Get the Service Fabric cluster manifest. + + Get the Service Fabric cluster manifest. The cluster manifest contains properties of the + cluster that include different node types on the cluster, + security configurations, fault, and upgrade domain topologies, etc. + + These properties are specified as part of the ClusterConfig.JSON file while deploying a + stand-alone cluster. However, most of the information in the cluster manifest + is generated internally by service fabric during cluster deployment in other deployment + scenarios (e.g. when using Azure portal). + + The contents of the cluster manifest are for informational purposes only and users are not + expected to take a dependency on the format of the file contents or its interpretation. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterManifest, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterManifest + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterManifest"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_manifest.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterManifest', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_manifest.metadata = {'url': '/$/GetClusterManifest'} # type: ignore + + def get_cluster_health( + self, + nodes_health_state_filter=0, # type: Optional[int] + applications_health_state_filter=0, # type: Optional[int] + events_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + include_system_application_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterHealth" + """Gets the health of a Service Fabric cluster. + + Use EventsHealthStateFilter to filter the collection of health events reported on the cluster + based on the health state. + Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the + collection of nodes and applications returned based on their aggregated health state. + + :param nodes_health_state_filter: Allows filtering of the node health state objects returned in + the result of cluster health query + based on their health state. The possible values for this parameter include integer value of + one of the + following health states. Only nodes that match the filter are returned. All nodes are used to + evaluate the aggregated health state. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of nodes with HealthState value of + OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type nodes_health_state_filter: int + :param applications_health_state_filter: Allows filtering of the application health state + objects returned in the result of cluster health + query based on their health state. + The possible values for this parameter include integer value obtained from members or bitwise + operations + on members of HealthStateFilter enumeration. Only applications that match the filter are + returned. + All applications are used to evaluate the aggregated health state. If not specified, all + entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of applications with HealthState + value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type applications_health_state_filter: int + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param include_system_application_health_statistics: Indicates whether the health statistics + should include the fabric:/System application health statistics. False by default. + If IncludeSystemApplicationHealthStatistics is set to true, the health statistics include the + entities that belong to the fabric:/System application. + Otherwise, the query result includes health statistics only for user applications. + The health statistics must be included in the query result for this parameter to be applied. + :type include_system_application_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_health.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if nodes_health_state_filter is not None: + query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') + if applications_health_state_filter is not None: + query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if include_system_application_health_statistics is not None: + query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_health.metadata = {'url': '/$/GetClusterHealth'} # type: ignore + + def get_cluster_health_using_policy( + self, + nodes_health_state_filter=0, # type: Optional[int] + applications_health_state_filter=0, # type: Optional[int] + events_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + include_system_application_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + application_health_policy_map=None, # type: Optional[List["_models.ApplicationHealthPolicyMapItem"]] + cluster_health_policy=None, # type: Optional["_models.ClusterHealthPolicy"] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterHealth" + """Gets the health of a Service Fabric cluster using the specified policy. + + Use EventsHealthStateFilter to filter the collection of health events reported on the cluster + based on the health state. + Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the + collection of nodes and applications returned based on their aggregated health state. + Use ClusterHealthPolicies to override the health policies used to evaluate the health. + + :param nodes_health_state_filter: Allows filtering of the node health state objects returned in + the result of cluster health query + based on their health state. The possible values for this parameter include integer value of + one of the + following health states. Only nodes that match the filter are returned. All nodes are used to + evaluate the aggregated health state. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of nodes with HealthState value of + OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type nodes_health_state_filter: int + :param applications_health_state_filter: Allows filtering of the application health state + objects returned in the result of cluster health + query based on their health state. + The possible values for this parameter include integer value obtained from members or bitwise + operations + on members of HealthStateFilter enumeration. Only applications that match the filter are + returned. + All applications are used to evaluate the aggregated health state. If not specified, all + entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of applications with HealthState + value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type applications_health_state_filter: int + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param include_system_application_health_statistics: Indicates whether the health statistics + should include the fabric:/System application health statistics. False by default. + If IncludeSystemApplicationHealthStatistics is set to true, the health statistics include the + entities that belong to the fabric:/System application. + Otherwise, the query result includes health statistics only for user applications. + The health statistics must be included in the query result for this parameter to be applied. + :type include_system_application_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy_map: Defines a map that contains specific application health + policies for different applications. + Each entry specifies as key the application name and as value an ApplicationHealthPolicy used + to evaluate the application health. + If an application is not specified in the map, the application health evaluation uses the + ApplicationHealthPolicy found in its application manifest or the default application health + policy (if no health policy is defined in the manifest). + The map is empty by default. + :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] + :param cluster_health_policy: Defines a health policy used to evaluate the health of the + cluster or of a cluster node. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cluster_health_policies = _models.ClusterHealthPolicies(application_health_policy_map=application_health_policy_map, cluster_health_policy=cluster_health_policy) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_cluster_health_using_policy.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if nodes_health_state_filter is not None: + query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') + if applications_health_state_filter is not None: + query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if include_system_application_health_statistics is not None: + query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _cluster_health_policies is not None: + body_content = self._serialize.body(_cluster_health_policies, 'ClusterHealthPolicies') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_health_using_policy.metadata = {'url': '/$/GetClusterHealth'} # type: ignore + + def get_cluster_health_chunk( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterHealthChunk" + """Gets the health of a Service Fabric cluster using health chunks. + + Gets the health of a Service Fabric cluster using health chunks. Includes the aggregated health + state of the cluster, but none of the cluster entities. + To expand the cluster health and get the health state of all or some of the entities, use the + POST URI and specify the cluster health chunk query description. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterHealthChunk, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterHealthChunk + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealthChunk"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_health_chunk.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterHealthChunk', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_health_chunk.metadata = {'url': '/$/GetClusterHealthChunk'} # type: ignore + + def get_cluster_health_chunk_using_policy_and_advanced_filters( + self, + timeout=60, # type: Optional[int] + cluster_health_chunk_query_description=None, # type: Optional["_models.ClusterHealthChunkQueryDescription"] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterHealthChunk" + """Gets the health of a Service Fabric cluster using health chunks. + + Gets the health of a Service Fabric cluster using health chunks. The health evaluation is done + based on the input cluster health chunk query description. + The query description allows users to specify health policies for evaluating the cluster and + its children. + Users can specify very flexible filters to select which cluster entities to return. The + selection can be done based on the entities health state and based on the hierarchy. + The query can return multi-level children of the entities based on the specified filters. For + example, it can return one application with a specified name, and for this application, return + only services that are in Error or Warning, and all partitions and replicas for one of these + services. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param cluster_health_chunk_query_description: Describes the cluster and application health + policies used to evaluate the cluster health and the filters to select which cluster entities + to be returned. + If the cluster health policy is present, it is used to evaluate the cluster events and the + cluster nodes. If not present, the health evaluation uses the cluster health policy defined in + the cluster manifest or the default cluster health policy. + By default, each application is evaluated using its specific application health policy, + defined in the application manifest, or the default health policy, if no policy is defined in + manifest. + If the application health policy map is specified, and it has an entry for an application, the + specified application health policy + is used to evaluate the application health. + Users can specify very flexible filters to select which cluster entities to include in + response. The selection can be done based on the entities health state and based on the + hierarchy. + The query can return multi-level children of the entities based on the specified filters. For + example, it can return one application with a specified name, and for this application, return + only services that are in Error or Warning, and all partitions and replicas for one of these + services. + :type cluster_health_chunk_query_description: ~azure.servicefabric.models.ClusterHealthChunkQueryDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterHealthChunk, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterHealthChunk + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealthChunk"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_cluster_health_chunk_using_policy_and_advanced_filters.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if cluster_health_chunk_query_description is not None: + body_content = self._serialize.body(cluster_health_chunk_query_description, 'ClusterHealthChunkQueryDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterHealthChunk', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_health_chunk_using_policy_and_advanced_filters.metadata = {'url': '/$/GetClusterHealthChunk'} # type: ignore + + def report_cluster_health( + self, + health_information, # type: "_models.HealthInformation" + immediate=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Sends a health report on the Service Fabric cluster. + + Sends a health report on a Service Fabric cluster. The report must contain the information + about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway node, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetClusterHealth and check that + the report appears in the HealthEvents section. + + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_cluster_health.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_cluster_health.metadata = {'url': '/$/ReportClusterHealth'} # type: ignore + + def get_provisioned_fabric_code_version_info_list( + self, + code_version=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List["_models.FabricCodeVersionInfo"] + """Gets a list of fabric code versions that are provisioned in a Service Fabric cluster. + + Gets a list of information about fabric code versions that are provisioned in the cluster. The + parameter CodeVersion can be used to optionally filter the output to only that particular + version. + + :param code_version: The product version of Service Fabric. + :type code_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of FabricCodeVersionInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.FabricCodeVersionInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricCodeVersionInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_provisioned_fabric_code_version_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if code_version is not None: + query_parameters['CodeVersion'] = self._serialize.query("code_version", code_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[FabricCodeVersionInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_provisioned_fabric_code_version_info_list.metadata = {'url': '/$/GetProvisionedCodeVersions'} # type: ignore + + def get_provisioned_fabric_config_version_info_list( + self, + config_version=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List["_models.FabricConfigVersionInfo"] + """Gets a list of fabric config versions that are provisioned in a Service Fabric cluster. + + Gets a list of information about fabric config versions that are provisioned in the cluster. + The parameter ConfigVersion can be used to optionally filter the output to only that particular + version. + + :param config_version: The config version of Service Fabric. + :type config_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of FabricConfigVersionInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.FabricConfigVersionInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricConfigVersionInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_provisioned_fabric_config_version_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if config_version is not None: + query_parameters['ConfigVersion'] = self._serialize.query("config_version", config_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[FabricConfigVersionInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_provisioned_fabric_config_version_info_list.metadata = {'url': '/$/GetProvisionedConfigVersions'} # type: ignore + + def get_cluster_upgrade_progress( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterUpgradeProgressObject" + """Gets the progress of the current cluster upgrade. + + Gets the current progress of the ongoing cluster upgrade. If no upgrade is currently in + progress, get the last state of the previous cluster upgrade. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterUpgradeProgressObject, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterUpgradeProgressObject + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterUpgradeProgressObject"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_upgrade_progress.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterUpgradeProgressObject', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_upgrade_progress.metadata = {'url': '/$/GetUpgradeProgress'} # type: ignore + + def get_cluster_configuration( + self, + configuration_api_version, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterConfiguration" + """Get the Service Fabric standalone cluster configuration. + + The cluster configuration contains properties of the cluster that include different node types + on the cluster, + security configurations, fault, and upgrade domain topologies, etc. + + :param configuration_api_version: The API version of the Standalone cluster json configuration. + :type configuration_api_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterConfiguration, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterConfiguration + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterConfiguration"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_configuration.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ConfigurationApiVersion'] = self._serialize.query("configuration_api_version", configuration_api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterConfiguration', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_configuration.metadata = {'url': '/$/GetClusterConfiguration'} # type: ignore + + def get_cluster_configuration_upgrade_status( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterConfigurationUpgradeStatusInfo" + """Get the cluster configuration upgrade status of a Service Fabric standalone cluster. + + Get the cluster configuration upgrade status details of a Service Fabric standalone cluster. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterConfigurationUpgradeStatusInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterConfigurationUpgradeStatusInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterConfigurationUpgradeStatusInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_configuration_upgrade_status.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterConfigurationUpgradeStatusInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_configuration_upgrade_status.metadata = {'url': '/$/GetClusterConfigurationUpgradeStatus'} # type: ignore + + def get_upgrade_orchestration_service_state( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.UpgradeOrchestrationServiceState" + """Get the service state of Service Fabric Upgrade Orchestration Service. + + Get the service state of Service Fabric Upgrade Orchestration Service. This API is internally + used for support purposes. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UpgradeOrchestrationServiceState, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceState + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UpgradeOrchestrationServiceState"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_upgrade_orchestration_service_state.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UpgradeOrchestrationServiceState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_upgrade_orchestration_service_state.metadata = {'url': '/$/GetUpgradeOrchestrationServiceState'} # type: ignore + + def set_upgrade_orchestration_service_state( + self, + timeout=60, # type: Optional[int] + service_state=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.UpgradeOrchestrationServiceStateSummary" + """Update the service state of Service Fabric Upgrade Orchestration Service. + + Update the service state of Service Fabric Upgrade Orchestration Service. This API is + internally used for support purposes. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param service_state: The state of Service Fabric Upgrade Orchestration Service. + :type service_state: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UpgradeOrchestrationServiceStateSummary, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceStateSummary + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UpgradeOrchestrationServiceStateSummary"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _upgrade_orchestration_service_state = _models.UpgradeOrchestrationServiceState(service_state=service_state) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.set_upgrade_orchestration_service_state.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_upgrade_orchestration_service_state, 'UpgradeOrchestrationServiceState') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UpgradeOrchestrationServiceStateSummary', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + set_upgrade_orchestration_service_state.metadata = {'url': '/$/SetUpgradeOrchestrationServiceState'} # type: ignore + + def provision_cluster( + self, + timeout=60, # type: Optional[int] + code_file_path=None, # type: Optional[str] + cluster_manifest_file_path=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Provision the code or configuration packages of a Service Fabric cluster. + + Validate and provision the code or configuration packages of a Service Fabric cluster. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param code_file_path: The cluster code package file path. + :type code_file_path: str + :param cluster_manifest_file_path: The cluster manifest file path. + :type cluster_manifest_file_path: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _provision_fabric_description = _models.ProvisionFabricDescription(code_file_path=code_file_path, cluster_manifest_file_path=cluster_manifest_file_path) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.provision_cluster.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_provision_fabric_description, 'ProvisionFabricDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + provision_cluster.metadata = {'url': '/$/Provision'} # type: ignore + + def unprovision_cluster( + self, + timeout=60, # type: Optional[int] + code_version=None, # type: Optional[str] + config_version=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Unprovision the code or configuration packages of a Service Fabric cluster. + + It is supported to unprovision code and configuration separately. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param code_version: The cluster code package version. + :type code_version: str + :param config_version: The cluster manifest version. + :type config_version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _unprovision_fabric_description = _models.UnprovisionFabricDescription(code_version=code_version, config_version=config_version) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.unprovision_cluster.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_unprovision_fabric_description, 'UnprovisionFabricDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + unprovision_cluster.metadata = {'url': '/$/Unprovision'} # type: ignore + + def rollback_cluster_upgrade( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Roll back the upgrade of a Service Fabric cluster. + + Roll back the code or configuration upgrade of a Service Fabric cluster. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.rollback_cluster_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + rollback_cluster_upgrade.metadata = {'url': '/$/RollbackUpgrade'} # type: ignore + + def resume_cluster_upgrade( + self, + upgrade_domain, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Make the cluster upgrade move on to the next upgrade domain. + + Make the cluster code or configuration upgrade move on to the next upgrade domain if + appropriate. + + :param upgrade_domain: The next upgrade domain for this cluster upgrade. + :type upgrade_domain: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _resume_cluster_upgrade_description = _models.ResumeClusterUpgradeDescription(upgrade_domain=upgrade_domain) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.resume_cluster_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_resume_cluster_upgrade_description, 'ResumeClusterUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_cluster_upgrade.metadata = {'url': '/$/MoveToNextUpgradeDomain'} # type: ignore + + def start_cluster_upgrade( + self, + start_cluster_upgrade_description, # type: "_models.StartClusterUpgradeDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Start upgrading the code or configuration version of a Service Fabric cluster. + + Validate the supplied upgrade parameters and start upgrading the code or configuration version + of a Service Fabric cluster if the parameters are valid. + + :param start_cluster_upgrade_description: Describes the parameters for starting a cluster + upgrade. + :type start_cluster_upgrade_description: ~azure.servicefabric.models.StartClusterUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_cluster_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(start_cluster_upgrade_description, 'StartClusterUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_cluster_upgrade.metadata = {'url': '/$/Upgrade'} # type: ignore + + def start_cluster_configuration_upgrade( + self, + cluster_configuration_upgrade_description, # type: "_models.ClusterConfigurationUpgradeDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Start upgrading the configuration of a Service Fabric standalone cluster. + + Validate the supplied configuration upgrade parameters and start upgrading the cluster + configuration if the parameters are valid. + + :param cluster_configuration_upgrade_description: Parameters for a standalone cluster + configuration upgrade. + :type cluster_configuration_upgrade_description: ~azure.servicefabric.models.ClusterConfigurationUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_cluster_configuration_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(cluster_configuration_upgrade_description, 'ClusterConfigurationUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_cluster_configuration_upgrade.metadata = {'url': '/$/StartClusterConfigurationUpgrade'} # type: ignore + + def update_cluster_upgrade( + self, + update_cluster_upgrade_description, # type: "_models.UpdateClusterUpgradeDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Update the upgrade parameters of a Service Fabric cluster upgrade. + + Update the upgrade parameters used during a Service Fabric cluster upgrade. + + :param update_cluster_upgrade_description: Parameters for updating a cluster upgrade. + :type update_cluster_upgrade_description: ~azure.servicefabric.models.UpdateClusterUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_cluster_upgrade.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(update_cluster_upgrade_description, 'UpdateClusterUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + update_cluster_upgrade.metadata = {'url': '/$/UpdateUpgrade'} # type: ignore + + def get_aad_metadata( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.AadMetadataObject" + """Gets the Azure Active Directory metadata used for secured connection to cluster. + + Gets the Azure Active Directory metadata used for secured connection to cluster. + This API is not supposed to be called separately. It provides information needed to set up an + Azure Active Directory secured connection with a Service Fabric cluster. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: AadMetadataObject, or the result of cls(response) + :rtype: ~azure.servicefabric.models.AadMetadataObject + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.AadMetadataObject"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_aad_metadata.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('AadMetadataObject', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_aad_metadata.metadata = {'url': '/$/GetAadMetadata'} # type: ignore + + def get_cluster_version( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterVersion" + """Get the current Service Fabric cluster version. + + If a cluster upgrade is happening, then this API will return the lowest (older) version of the + current and target cluster runtime versions. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterVersion, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterVersion + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterVersion"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_version.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterVersion', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_version.metadata = {'url': '/$/GetClusterVersion'} # type: ignore + + def get_cluster_load( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ClusterLoadInfo" + """Gets the load of a Service Fabric cluster. + + Retrieves the load information of a Service Fabric cluster for all the metrics that have load + or capacity defined. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ClusterLoadInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ClusterLoadInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterLoadInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_load.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ClusterLoadInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_load.metadata = {'url': '/$/GetLoadInformation'} # type: ignore + + def toggle_verbose_service_placement_health_reporting( + self, + enabled, # type: bool + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Changes the verbosity of service placement health reporting. + + If verbosity is set to true, then detailed health reports will be generated when replicas + cannot be placed or dropped. + If verbosity is set to false, then no health reports will be generated when replicas cannot be + placed or dropped. + + :param enabled: The verbosity of service placement health reporting. + :type enabled: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.toggle_verbose_service_placement_health_reporting.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['Enabled'] = self._serialize.query("enabled", enabled, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + toggle_verbose_service_placement_health_reporting.metadata = {'url': '/$/ToggleVerboseServicePlacementHealthReporting'} # type: ignore + + def get_node_info_list( + self, + continuation_token_parameter=None, # type: Optional[str] + node_status_filter="default", # type: Optional[Union[str, "_models.NodeStatusFilter"]] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedNodeInfoList" + """Gets the list of nodes in the Service Fabric cluster. + + The response includes the name, status, ID, health, uptime, and other details about the nodes. + + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param node_status_filter: Allows filtering the nodes based on the NodeStatus. Only the nodes + that are matching the specified filter value will be returned. The filter value can be one of + the following. + :type node_status_filter: str or ~azure.servicefabric.models.NodeStatusFilter + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedNodeInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedNodeInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedNodeInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if node_status_filter is not None: + query_parameters['NodeStatusFilter'] = self._serialize.query("node_status_filter", node_status_filter, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedNodeInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_info_list.metadata = {'url': '/Nodes'} # type: ignore + + def get_node_info( + self, + node_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.NodeInfo"] + """Gets the information about a specific node in the Service Fabric cluster. + + The response includes the name, status, ID, health, uptime, and other details about the node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NodeInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('NodeInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_info.metadata = {'url': '/Nodes/{nodeName}'} # type: ignore + + def get_node_health( + self, + node_name, # type: str + events_health_state_filter=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.NodeHealth" + """Gets the health of a Service Fabric node. + + Gets the health of a Service Fabric node. Use EventsHealthStateFilter to filter the collection + of health events reported on the node based on the health state. If the node that you specify + by name does not exist in the health store, this returns an error. + + :param node_name: The name of the node. + :type node_name: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('NodeHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_health.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} # type: ignore + + def get_node_health_using_policy( + self, + node_name, # type: str + events_health_state_filter=0, # type: Optional[int] + timeout=60, # type: Optional[int] + cluster_health_policy=None, # type: Optional["_models.ClusterHealthPolicy"] + **kwargs # type: Any + ): + # type: (...) -> "_models.NodeHealth" + """Gets the health of a Service Fabric node, by using the specified health policy. + + Gets the health of a Service Fabric node. Use EventsHealthStateFilter to filter the collection + of health events reported on the node based on the health state. Use ClusterHealthPolicy in the + POST body to override the health policies used to evaluate the health. If the node that you + specify by name does not exist in the health store, this returns an error. + + :param node_name: The name of the node. + :type node_name: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param cluster_health_policy: Describes the health policies used to evaluate the health of a + cluster or node. If not present, the health evaluation uses the health policy from cluster + manifest or the default health policy. + :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_node_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if cluster_health_policy is not None: + body_content = self._serialize.body(cluster_health_policy, 'ClusterHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('NodeHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} # type: ignore + + def report_node_health( + self, + node_name, # type: str + health_information, # type: "_models.HealthInformation" + immediate=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Sends a health report on the Service Fabric node. + + Reports health state of the specified Service Fabric node. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway node, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetNodeHealth and check that the + report appears in the HealthEvents section. + + :param node_name: The name of the node. + :type node_name: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_node_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_node_health.metadata = {'url': '/Nodes/{nodeName}/$/ReportHealth'} # type: ignore + + def get_node_load_info( + self, + node_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.NodeLoadInfo" + """Gets the load information of a Service Fabric node. + + Retrieves the load information of a Service Fabric node for all the metrics that have load or + capacity defined. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeLoadInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeLoadInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeLoadInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_load_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('NodeLoadInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_load_info.metadata = {'url': '/Nodes/{nodeName}/$/GetLoadInformation'} # type: ignore + + def disable_node( + self, + node_name, # type: str + timeout=60, # type: Optional[int] + deactivation_intent=None, # type: Optional[Union[str, "_models.DeactivationIntent"]] + **kwargs # type: Any + ): + # type: (...) -> None + """Deactivate a Service Fabric cluster node with the specified deactivation intent. + + Deactivate a Service Fabric cluster node with the specified deactivation intent. Once the + deactivation is in progress, the deactivation intent can be increased, but not decreased (for + example, a node that is deactivated with the Pause intent can be deactivated further with + Restart, but not the other way around. Nodes may be reactivated using the Activate a node + operation any time after they are deactivated. If the deactivation is not complete, this will + cancel the deactivation. A node that goes down and comes back up while deactivated will still + need to be reactivated before services will be placed on that node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param deactivation_intent: Describes the intent or reason for deactivating the node. The + possible values are following. + :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _deactivation_intent_description = _models.DeactivationIntentDescription(deactivation_intent=deactivation_intent) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.disable_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_deactivation_intent_description, 'DeactivationIntentDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + disable_node.metadata = {'url': '/Nodes/{nodeName}/$/Deactivate'} # type: ignore + + def enable_node( + self, + node_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Activate a Service Fabric cluster node that is currently deactivated. + + Activates a Service Fabric cluster node that is currently deactivated. Once activated, the node + will again become a viable target for placing new replicas, and any deactivated replicas + remaining on the node will be reactivated. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.enable_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + enable_node.metadata = {'url': '/Nodes/{nodeName}/$/Activate'} # type: ignore + + def remove_node_state( + self, + node_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Notifies Service Fabric that the persisted state on a node has been permanently removed or lost. + + This implies that it is not possible to recover the persisted state of that node. This + generally happens if a hard disk has been wiped clean, or if a hard disk crashes. The node has + to be down for this operation to be successful. This operation lets Service Fabric know that + the replicas on that node no longer exist, and that Service Fabric should stop waiting for + those replicas to come back up. Do not run this cmdlet if the state on the node has not been + removed and the node can come back up with its state intact. Starting from Service Fabric 6.5, + in order to use this API for seed nodes, please change the seed nodes to regular (non-seed) + nodes and then invoke this API to remove the node state. If the cluster is running on Azure, + after the seed node goes down, Service Fabric will try to change it to a non-seed node + automatically. To make this happen, make sure the number of non-seed nodes in the primary node + type is no less than the number of Down seed nodes. If necessary, add more nodes to the primary + node type to achieve this. For standalone cluster, if the Down seed node is not expected to + come back up with its state intact, please remove the node from the cluster, see + https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-windows-server-add-remove-nodes. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.remove_node_state.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_node_state.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeState'} # type: ignore + + def restart_node( + self, + node_name, # type: str + timeout=60, # type: Optional[int] + node_instance_id="0", # type: str + create_fabric_dump="False", # type: Optional[Union[str, "_models.CreateFabricDump"]] + **kwargs # type: Any + ): + # type: (...) -> None + """Restarts a Service Fabric cluster node. + + Restarts a Service Fabric cluster node that is already started. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param node_instance_id: The instance ID of the target node. If instance ID is specified the + node is restarted only if it matches with the current instance of the node. A default value of + "0" would match any instance ID. The instance ID can be obtained using get node query. + :type node_instance_id: str + :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is + case-sensitive. + :type create_fabric_dump: str or ~azure.servicefabric.models.CreateFabricDump + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _restart_node_description = _models.RestartNodeDescription(node_instance_id=node_instance_id, create_fabric_dump=create_fabric_dump) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.restart_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_restart_node_description, 'RestartNodeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + restart_node.metadata = {'url': '/Nodes/{nodeName}/$/Restart'} # type: ignore + + def remove_configuration_overrides( + self, + node_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Removes configuration overrides on the specified node. + + This api allows removing all existing configuration overrides on specified node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.remove_configuration_overrides.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/RemoveConfigurationOverrides'} # type: ignore + + def get_configuration_overrides( + self, + node_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ConfigParameterOverride"] + """Gets the list of configuration overrides on the specified node. + + This api allows getting all existing configuration overrides on the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ConfigParameterOverride, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ConfigParameterOverride] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConfigParameterOverride"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_configuration_overrides.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ConfigParameterOverride]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/GetConfigurationOverrides'} # type: ignore + + def add_configuration_parameter_overrides( + self, + node_name, # type: str + config_parameter_override_list, # type: List["_models.ConfigParameterOverride"] + force=None, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Adds the list of configuration overrides on the specified node. + + This api allows adding all existing configuration overrides on the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param config_parameter_override_list: Description for adding list of configuration overrides. + :type config_parameter_override_list: list[~azure.servicefabric.models.ConfigParameterOverride] + :param force: Force adding configuration overrides on specified nodes. + :type force: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.add_configuration_parameter_overrides.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force is not None: + query_parameters['Force'] = self._serialize.query("force", force, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(config_parameter_override_list, '[ConfigParameterOverride]') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + add_configuration_parameter_overrides.metadata = {'url': '/Nodes/{nodeName}/$/AddConfigurationParameterOverrides'} # type: ignore + + def remove_node_tags( + self, + node_name, # type: str + node_tags, # type: List[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Removes the list of tags from the specified node. + + This api allows removing set of tags from the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param node_tags: Description for adding list of node tags. + :type node_tags: list[str] + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.remove_node_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(node_tags, '[str]') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeTags'} # type: ignore + + def add_node_tags( + self, + node_name, # type: str + node_tags, # type: List[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Adds the list of tags on the specified node. + + This api allows adding tags to the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param node_tags: Description for adding list of node tags. + :type node_tags: list[str] + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.add_node_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(node_tags, '[str]') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + add_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/AddNodeTags'} # type: ignore + + def get_application_type_info_list( + self, + application_type_definition_kind_filter=0, # type: Optional[int] + exclude_application_parameters=False, # type: Optional[bool] + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedApplicationTypeInfoList" + """Gets the list of application types in the Service Fabric cluster. + + Returns the information about the application types that are provisioned or in the process of + being provisioned in the Service Fabric cluster. Each version of an application type is + returned as one application type. The response includes the name, version, status, and other + details about the application type. This is a paged query, meaning that if not all of the + application types fit in a page, one page of results is returned as well as a continuation + token, which can be used to get the next page. For example, if there are 10 application types + but a page only fits the first three application types, or if max results is set to 3, then + three is returned. To access the rest of the results, retrieve subsequent pages by using the + returned continuation token in the next query. An empty continuation token is returned if there + are no subsequent pages. + + :param application_type_definition_kind_filter: Used to filter on ApplicationTypeDefinitionKind + which is the mechanism used to define a Service Fabric application type. + + + * Default - Default value, which performs the same function as selecting "All". The value is + 0. + * All - Filter that matches input with any ApplicationTypeDefinitionKind value. The value is + 65535. + * ServiceFabricApplicationPackage - Filter that matches input with + ApplicationTypeDefinitionKind value ServiceFabricApplicationPackage. The value is 1. + * Compose - Filter that matches input with ApplicationTypeDefinitionKind value Compose. The + value is 2. + :type application_type_definition_kind_filter: int + :param exclude_application_parameters: The flag that specifies whether application parameters + will be excluded from the result. + :type exclude_application_parameters: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedApplicationTypeInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationTypeInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_type_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if application_type_definition_kind_filter is not None: + query_parameters['ApplicationTypeDefinitionKindFilter'] = self._serialize.query("application_type_definition_kind_filter", application_type_definition_kind_filter, 'int') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedApplicationTypeInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_type_info_list.metadata = {'url': '/ApplicationTypes'} # type: ignore + + def get_application_type_info_list_by_name( + self, + application_type_name, # type: str + application_type_version=None, # type: Optional[str] + exclude_application_parameters=False, # type: Optional[bool] + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedApplicationTypeInfoList" + """Gets the list of application types in the Service Fabric cluster matching exactly the specified name. + + Returns the information about the application types that are provisioned or in the process of + being provisioned in the Service Fabric cluster. These results are of application types whose + name match exactly the one specified as the parameter, and which comply with the given query + parameters. All versions of the application type matching the application type name are + returned, with each version returned as one application type. The response includes the name, + version, status, and other details about the application type. This is a paged query, meaning + that if not all of the application types fit in a page, one page of results is returned as well + as a continuation token, which can be used to get the next page. For example, if there are 10 + application types but a page only fits the first three application types, or if max results is + set to 3, then three is returned. To access the rest of the results, retrieve subsequent pages + by using the returned continuation token in the next query. An empty continuation token is + returned if there are no subsequent pages. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param exclude_application_parameters: The flag that specifies whether application parameters + will be excluded from the result. + :type exclude_application_parameters: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedApplicationTypeInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationTypeInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_type_info_list_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if application_type_version is not None: + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedApplicationTypeInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_type_info_list_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}'} # type: ignore + + def provision_application_type( + self, + provision_application_type_description_base_required_body_param, # type: "_models.ProvisionApplicationTypeDescriptionBase" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Provisions or registers a Service Fabric application type with the cluster using the '.sfpkg' package in the external store or using the application package in the image store. + + Provisions a Service Fabric application type with the cluster. The provision is required before + any new applications can be instantiated. + The provision operation can be performed either on the application package specified by the + relativePathInImageStore, or by using the URI of the external '.sfpkg'. + + :param provision_application_type_description_base_required_body_param: The base type of + provision application type description which supports either image store-based provision or + external store-based provision. + :type provision_application_type_description_base_required_body_param: ~azure.servicefabric.models.ProvisionApplicationTypeDescriptionBase + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.provision_application_type.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(provision_application_type_description_base_required_body_param, 'ProvisionApplicationTypeDescriptionBase') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + provision_application_type.metadata = {'url': '/ApplicationTypes/$/Provision'} # type: ignore + + def unprovision_application_type( + self, + application_type_name, # type: str + application_type_version, # type: str + timeout=60, # type: Optional[int] + async_parameter=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> None + """Removes or unregisters a Service Fabric application type from the cluster. + + This operation can only be performed if all application instances of the application type have + been deleted. Once the application type is unregistered, no new application instances can be + created for this particular application type. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type as defined in the + application manifest. + :type application_type_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param async_parameter: The flag indicating whether or not unprovision should occur + asynchronously. When set to true, the unprovision operation returns when the request is + accepted by the system, and the unprovision operation continues without any timeout limit. The + default value is false. However, we recommend setting it to true for large application packages + that were provisioned. + :type async_parameter: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _unprovision_application_type_description_info = _models.UnprovisionApplicationTypeDescriptionInfo(application_type_version=application_type_version, async_property=async_parameter) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.unprovision_application_type.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_unprovision_application_type_description_info, 'UnprovisionApplicationTypeDescriptionInfo') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + unprovision_application_type.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/Unprovision'} # type: ignore + + def get_service_type_info_list( + self, + application_type_name, # type: str + application_type_version, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ServiceTypeInfo"] + """Gets the list containing the information about service types that are supported by a provisioned application type in a Service Fabric cluster. + + Gets the list containing the information about service types that are supported by a + provisioned application type in a Service Fabric cluster. The provided application type must + exist. Otherwise, a 404 status is returned. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ServiceTypeInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ServiceTypeInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceTypeInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_type_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ServiceTypeInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_type_info_list.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes'} # type: ignore + + def get_service_type_info_by_name( + self, + application_type_name, # type: str + application_type_version, # type: str + service_type_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.ServiceTypeInfo"] + """Gets the information about a specific service type that is supported by a provisioned application type in a Service Fabric cluster. + + Gets the information about a specific service type that is supported by a provisioned + application type in a Service Fabric cluster. The provided application type must exist. + Otherwise, a 404 status is returned. A 204 response is returned if the specified service type + is not found in the cluster. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param service_type_name: Specifies the name of a Service Fabric service type. + :type service_type_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceTypeInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceTypeInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServiceTypeInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_type_info_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceTypeInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_type_info_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes/{serviceTypeName}'} # type: ignore + + def get_service_manifest( + self, + application_type_name, # type: str + application_type_version, # type: str + service_manifest_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ServiceTypeManifest" + """Gets the manifest describing a service type. + + Gets the manifest describing a service type. The response contains the service manifest XML as + a string. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceTypeManifest, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceTypeManifest + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceTypeManifest"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_manifest.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceTypeManifest', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceManifest'} # type: ignore + + def get_deployed_service_type_info_list( + self, + node_name, # type: str + application_id, # type: str + service_manifest_name=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List["_models.DeployedServiceTypeInfo"] + """Gets the list containing the information about service types from the applications deployed on a node in a Service Fabric cluster. + + Gets the list containing the information about service types from the applications deployed on + a node in a Service Fabric cluster. The response includes the name of the service type, its + registration status, the code package that registered it and activation ID of the service + package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_manifest_name: The name of the service manifest to filter the list of deployed + service type information. If specified, the response will only contain the information about + service types that are defined in this service manifest. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServiceTypeInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedServiceTypeInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_type_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[DeployedServiceTypeInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_type_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes'} # type: ignore + + def get_deployed_service_type_info_by_name( + self, + node_name, # type: str + application_id, # type: str + service_type_name, # type: str + service_manifest_name=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional[List["_models.DeployedServiceTypeInfo"]] + """Gets the information about a specified service type of the application deployed on a node in a Service Fabric cluster. + + Gets the list containing the information about a specific service type from the applications + deployed on a node in a Service Fabric cluster. The response includes the name of the service + type, its registration status, the code package that registered it and activation ID of the + service package. Each entry represents one activation of a service type, differentiated by the + activation ID. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_type_name: Specifies the name of a Service Fabric service type. + :type service_type_name: str + :param service_manifest_name: The name of the service manifest to filter the list of deployed + service type information. If specified, the response will only contain the information about + service types that are defined in this service manifest. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServiceTypeInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServiceTypeInfo"]]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_type_info_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServiceTypeInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_type_info_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes/{serviceTypeName}'} # type: ignore + + def create_application( + self, + application_description, # type: "_models.ApplicationDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates a Service Fabric application. + + Creates a Service Fabric application using the specified description. + + :param application_description: Description for creating an application. + :type application_description: ~azure.servicefabric.models.ApplicationDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_application.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(application_description, 'ApplicationDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_application.metadata = {'url': '/Applications/$/Create'} # type: ignore + + def delete_application( + self, + application_id, # type: str + force_remove=None, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes an existing Service Fabric application. + + An application must be created before it can be deleted. Deleting an application will delete + all services that are part of that application. By default, Service Fabric will try to close + service replicas in a graceful manner and then delete the service. However, if a service is + having issues closing the replica gracefully, the delete operation may take a long time or get + stuck. Use the optional ForceRemove flag to skip the graceful close sequence and forcefully + delete the application and all of its services. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param force_remove: Remove a Service Fabric application or service forcefully without going + through the graceful shutdown sequence. This parameter can be used to forcefully delete an + application or service for which delete is timing out due to issues in the service code that + prevents graceful close of replicas. + :type force_remove: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_application.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force_remove is not None: + query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_application.metadata = {'url': '/Applications/{applicationId}/$/Delete'} # type: ignore + + def get_application_load_info( + self, + application_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.ApplicationLoadInfo"] + """Gets load information about a Service Fabric application. + + Returns the load information about the application that was created or in the process of being + created in the Service Fabric cluster and whose name matches the one specified as the + parameter. The response includes the name, minimum nodes, maximum nodes, the number of nodes + the application is occupying currently, and application load metric information about the + application. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationLoadInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationLoadInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationLoadInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_load_info.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationLoadInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_load_info.metadata = {'url': '/Applications/{applicationId}/$/GetLoadInformation'} # type: ignore + + def get_application_info_list( + self, + application_definition_kind_filter=0, # type: Optional[int] + application_type_name=None, # type: Optional[str] + exclude_application_parameters=False, # type: Optional[bool] + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedApplicationInfoList" + """Gets the list of applications created in the Service Fabric cluster that match the specified filters. + + Gets the information about the applications that were created or in the process of being + created in the Service Fabric cluster and match the specified filters. The response includes + the name, type, status, parameters, and other details about the application. If the + applications do not fit in a page, one page of results is returned as well as a continuation + token, which can be used to get the next page. Filters ApplicationTypeName and + ApplicationDefinitionKindFilter cannot be specified at the same time. + + :param application_definition_kind_filter: Used to filter on ApplicationDefinitionKind, which + is the mechanism used to define a Service Fabric application. + + + * Default - Default value, which performs the same function as selecting "All". The value is + 0. + * All - Filter that matches input with any ApplicationDefinitionKind value. The value is + 65535. + * ServiceFabricApplicationDescription - Filter that matches input with + ApplicationDefinitionKind value ServiceFabricApplicationDescription. The value is 1. + * Compose - Filter that matches input with ApplicationDefinitionKind value Compose. The value + is 2. + :type application_definition_kind_filter: int + :param application_type_name: The application type name used to filter the applications to + query for. This value should not contain the application type version. + :type application_type_name: str + :param exclude_application_parameters: The flag that specifies whether application parameters + will be excluded from the result. + :type exclude_application_parameters: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedApplicationInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedApplicationInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if application_definition_kind_filter is not None: + query_parameters['ApplicationDefinitionKindFilter'] = self._serialize.query("application_definition_kind_filter", application_definition_kind_filter, 'int') + if application_type_name is not None: + query_parameters['ApplicationTypeName'] = self._serialize.query("application_type_name", application_type_name, 'str') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedApplicationInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_info_list.metadata = {'url': '/Applications'} # type: ignore + + def get_application_info( + self, + application_id, # type: str + exclude_application_parameters=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.ApplicationInfo"] + """Gets information about a Service Fabric application. + + Returns the information about the application that was created or in the process of being + created in the Service Fabric cluster and whose name matches the one specified as the + parameter. The response includes the name, type, status, parameters, and other details about + the application. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param exclude_application_parameters: The flag that specifies whether application parameters + will be excluded from the result. + :type exclude_application_parameters: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_info.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_info.metadata = {'url': '/Applications/{applicationId}'} # type: ignore + + def get_application_health( + self, + application_id, # type: str + events_health_state_filter=0, # type: Optional[int] + deployed_applications_health_state_filter=0, # type: Optional[int] + services_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ApplicationHealth" + """Gets the health of the service fabric application. + + Returns the heath state of the service fabric application. The response reports either Ok, + Error or Warning health state. If the entity is not found in the health store, it will return + Error. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param deployed_applications_health_state_filter: Allows filtering of the deployed applications + health state objects returned in the result of application health query based on their health + state. + The possible values for this parameter include integer value of one of the following health + states. Only deployed applications that match the filter will be returned. + All deployed applications are used to evaluate the aggregated health state. If not specified, + all entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values, obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of deployed applications with + HealthState value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type deployed_applications_health_state_filter: int + :param services_health_state_filter: Allows filtering of the services health state objects + returned in the result of services health query based on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only services that match the filter are returned. All services are used to evaluate the + aggregated health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, + obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health + state of services with HealthState value of OK (2) and Warning (4) will be returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type services_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_health.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_applications_health_state_filter is not None: + query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') + if services_health_state_filter is not None: + query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_health.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} # type: ignore + + def get_application_health_using_policy( + self, + application_id, # type: str + events_health_state_filter=0, # type: Optional[int] + deployed_applications_health_state_filter=0, # type: Optional[int] + services_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] + **kwargs # type: Any + ): + # type: (...) -> "_models.ApplicationHealth" + """Gets the health of a Service Fabric application using the specified policy. + + Gets the health of a Service Fabric application. Use EventsHealthStateFilter to filter the + collection of health events reported on the node based on the health state. Use + ClusterHealthPolicies to override the health policies used to evaluate the health. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param deployed_applications_health_state_filter: Allows filtering of the deployed applications + health state objects returned in the result of application health query based on their health + state. + The possible values for this parameter include integer value of one of the following health + states. Only deployed applications that match the filter will be returned. + All deployed applications are used to evaluate the aggregated health state. If not specified, + all entries are returned. + The state values are flag-based enumeration, so the value could be a combination of these + values, obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of deployed applications with + HealthState value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type deployed_applications_health_state_filter: int + :param services_health_state_filter: Allows filtering of the services health state objects + returned in the result of services health query based on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only services that match the filter are returned. All services are used to evaluate the + aggregated health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, + obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health + state of services with HealthState value of OK (2) and Warning (4) will be returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type services_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_application_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_applications_health_state_filter is not None: + query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') + if services_health_state_filter is not None: + query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_health_using_policy.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} # type: ignore + + def report_application_health( + self, + application_id, # type: str + health_information, # type: "_models.HealthInformation" + immediate=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Sends a health report on the Service Fabric application. + + Reports health state of the specified Service Fabric application. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway Application, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, get application health and check + that the report appears in the HealthEvents section. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_application_health.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_application_health.metadata = {'url': '/Applications/{applicationId}/$/ReportHealth'} # type: ignore + + def start_application_upgrade( + self, + application_id, # type: str + application_upgrade_description, # type: "_models.ApplicationUpgradeDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Starts upgrading an application in the Service Fabric cluster. + + Validates the supplied application upgrade parameters and starts upgrading the application if + the parameters are valid. + Note, `ApplicationParameter + `_\ + s are not preserved across an application upgrade. + In order to preserve current application parameters, the user should get the parameters using + `GetApplicationInfo <./GetApplicationInfo.md>`_ operation first and pass them into the upgrade + API call as shown in the example. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param application_upgrade_description: Parameters for an application upgrade. + :type application_upgrade_description: ~azure.servicefabric.models.ApplicationUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(application_upgrade_description, 'ApplicationUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/Upgrade'} # type: ignore + + def get_application_upgrade( + self, + application_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ApplicationUpgradeProgressInfo" + """Gets details for the latest upgrade performed on this application. + + Returns information about the state of the latest application upgrade along with details to aid + debugging application health issues. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationUpgradeProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationUpgradeProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationUpgradeProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationUpgradeProgressInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/GetUpgradeProgress'} # type: ignore + + def update_application_upgrade( + self, + application_id, # type: str + application_upgrade_update_description, # type: "_models.ApplicationUpgradeUpdateDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Updates an ongoing application upgrade in the Service Fabric cluster. + + Updates the parameters of an ongoing application upgrade from the ones specified at the time of + starting the application upgrade. This may be required to mitigate stuck application upgrades + due to incorrect parameters or issues in the application to make progress. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param application_upgrade_update_description: Parameters for updating an existing application + upgrade. + :type application_upgrade_update_description: ~azure.servicefabric.models.ApplicationUpgradeUpdateDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(application_upgrade_update_description, 'ApplicationUpgradeUpdateDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + update_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/UpdateUpgrade'} # type: ignore + + def resume_application_upgrade( + self, + application_id, # type: str + upgrade_domain_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Resumes upgrading an application in the Service Fabric cluster. + + Resumes an unmonitored manual Service Fabric application upgrade. Service Fabric upgrades one + upgrade domain at a time. For unmonitored manual upgrades, after Service Fabric finishes an + upgrade domain, it waits for you to call this API before proceeding to the next upgrade domain. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param upgrade_domain_name: The name of the upgrade domain in which to resume the upgrade. + :type upgrade_domain_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _resume_application_upgrade_description = _models.ResumeApplicationUpgradeDescription(upgrade_domain_name=upgrade_domain_name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.resume_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_resume_application_upgrade_description, 'ResumeApplicationUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/MoveToNextUpgradeDomain'} # type: ignore + + def rollback_application_upgrade( + self, + application_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Starts rolling back the currently on-going upgrade of an application in the Service Fabric cluster. + + Starts rolling back the current application upgrade to the previous version. This API can only + be used to roll back the current in-progress upgrade that is rolling forward to new version. If + the application is not currently being upgraded use StartApplicationUpgrade API to upgrade it + to desired version, including rolling back to a previous version. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.rollback_application_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + rollback_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/RollbackUpgrade'} # type: ignore + + def get_deployed_application_info_list( + self, + node_name, # type: str + timeout=60, # type: Optional[int] + include_health_state=False, # type: Optional[bool] + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedDeployedApplicationInfoList" + """Gets the list of applications deployed on a Service Fabric node. + + Gets the list of applications deployed on a Service Fabric node. The results do not include + information about deployed system applications unless explicitly queried for by ID. Results + encompass deployed applications in active, activating, and downloading states. This query + requires that the node name corresponds to a node on the cluster. The query fails if the + provided node name does not point to any active Service Fabric nodes on the cluster. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param include_health_state: Include the health state of an entity. + If this parameter is false or not specified, then the health state returned is "Unknown". + When set to true, the query goes in parallel to the node and the health system service before + the results are merged. + As a result, the query is more expensive and may take a longer time. + :type include_health_state: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedDeployedApplicationInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedDeployedApplicationInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedDeployedApplicationInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_application_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if include_health_state is not None: + query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedDeployedApplicationInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_application_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications'} # type: ignore + + def get_deployed_application_info( + self, + node_name, # type: str + application_id, # type: str + timeout=60, # type: Optional[int] + include_health_state=False, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.DeployedApplicationInfo"] + """Gets the information about an application deployed on a Service Fabric node. + + This query returns system application information if the application ID provided is for system + application. Results encompass deployed applications in active, activating, and downloading + states. This query requires that the node name corresponds to a node on the cluster. The query + fails if the provided node name does not point to any active Service Fabric nodes on the + cluster. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param include_health_state: Include the health state of an entity. + If this parameter is false or not specified, then the health state returned is "Unknown". + When set to true, the query goes in parallel to the node and the health system service before + the results are merged. + As a result, the query is more expensive and may take a longer time. + :type include_health_state: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedApplicationInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedApplicationInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.DeployedApplicationInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_application_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if include_health_state is not None: + query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DeployedApplicationInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_application_info.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}'} # type: ignore + + def get_deployed_application_health( + self, + node_name, # type: str + application_id, # type: str + events_health_state_filter=0, # type: Optional[int] + deployed_service_packages_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.DeployedApplicationHealth" + """Gets the information about health of an application deployed on a Service Fabric node. + + Gets the information about health of an application deployed on a Service Fabric node. Use + EventsHealthStateFilter to optionally filter for the collection of HealthEvent objects reported + on the deployed application based on health state. Use DeployedServicePackagesHealthStateFilter + to optionally filter for DeployedServicePackageHealth children based on health state. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param deployed_service_packages_health_state_filter: Allows filtering of the deployed service + package health state objects returned in the result of deployed application health query based + on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only deployed service packages that match the filter are returned. All deployed service + packages are used to evaluate the aggregated health state of the deployed application. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value can be a combination of these + values, obtained using the bitwise 'OR' operator. + For example, if the provided value is 6 then health state of service packages with HealthState + value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type deployed_service_packages_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedApplicationHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedApplicationHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedApplicationHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_application_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_service_packages_health_state_filter is not None: + query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedApplicationHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} # type: ignore + + def get_deployed_application_health_using_policy( + self, + node_name, # type: str + application_id, # type: str + events_health_state_filter=0, # type: Optional[int] + deployed_service_packages_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] + **kwargs # type: Any + ): + # type: (...) -> "_models.DeployedApplicationHealth" + """Gets the information about health of an application deployed on a Service Fabric node. using the specified policy. + + Gets the information about health of an application deployed on a Service Fabric node using the + specified policy. Use EventsHealthStateFilter to optionally filter for the collection of + HealthEvent objects reported on the deployed application based on health state. Use + DeployedServicePackagesHealthStateFilter to optionally filter for DeployedServicePackageHealth + children based on health state. Use ApplicationHealthPolicy to optionally override the health + policies used to evaluate the health. This API only uses 'ConsiderWarningAsError' field of the + ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the + deployed application. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param deployed_service_packages_health_state_filter: Allows filtering of the deployed service + package health state objects returned in the result of deployed application health query based + on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only deployed service packages that match the filter are returned. All deployed service + packages are used to evaluate the aggregated health state of the deployed application. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value can be a combination of these + values, obtained using the bitwise 'OR' operator. + For example, if the provided value is 6 then health state of service packages with HealthState + value of OK (2) and Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type deployed_service_packages_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedApplicationHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedApplicationHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedApplicationHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_deployed_application_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_service_packages_health_state_filter is not None: + query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedApplicationHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_application_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} # type: ignore + + def report_deployed_application_health( + self, + node_name, # type: str + application_id, # type: str + health_information, # type: "_models.HealthInformation" + immediate=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Sends a health report on the Service Fabric application deployed on a Service Fabric node. + + Reports health state of the application deployed on a Service Fabric node. The report must + contain the information about the source of the health report and property on which it is + reported. + The report is sent to a Service Fabric gateway Service, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, get deployed application health and + check that the report appears in the HealthEvents section. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_deployed_application_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/ReportHealth'} # type: ignore + + def get_application_manifest( + self, + application_type_name, # type: str + application_type_version, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ApplicationTypeManifest" + """Gets the manifest describing an application type. + + The response contains the application manifest XML as a string. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationTypeManifest, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationTypeManifest + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationTypeManifest"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_manifest.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationTypeManifest', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetApplicationManifest'} # type: ignore + + def get_service_info_list( + self, + application_id, # type: str + service_type_name=None, # type: Optional[str] + continuation_token_parameter=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedServiceInfoList" + """Gets the information about all services belonging to the application specified by the application ID. + + Returns the information about all services belonging to the application specified by the + application ID. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_type_name: The service type name used to filter the services to query for. + :type service_type_name: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedServiceInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedServiceInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if service_type_name is not None: + query_parameters['ServiceTypeName'] = self._serialize.query("service_type_name", service_type_name, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedServiceInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_info_list.metadata = {'url': '/Applications/{applicationId}/$/GetServices'} # type: ignore + + def get_service_info( + self, + application_id, # type: str + service_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.ServiceInfo"] + """Gets the information about the specific service belonging to the Service Fabric application. + + Returns the information about the specified service belonging to the specified Service Fabric + application. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServiceInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_info.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_info.metadata = {'url': '/Applications/{applicationId}/$/GetServices/{serviceId}'} # type: ignore + + def get_application_name_info( + self, + service_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ApplicationNameInfo" + """Gets the name of the Service Fabric application for a service. + + Gets the name of the application for the specified service. A 404 + FABRIC_E_SERVICE_DOES_NOT_EXIST error is returned if a service with the provided service ID + does not exist. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ApplicationNameInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ApplicationNameInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationNameInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_name_info.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ApplicationNameInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_name_info.metadata = {'url': '/Services/{serviceId}/$/GetApplicationName'} # type: ignore + + def create_service( + self, + application_id, # type: str + service_description, # type: "_models.ServiceDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates the specified Service Fabric service. + + This api allows creating a new Service Fabric stateless or stateful service under a specified + Service Fabric application. The description for creating the service includes partitioning + information and optional properties for placement and load balancing. Some of the properties + can later be modified using ``UpdateService`` API. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_description: The information necessary to create a service. + :type service_description: ~azure.servicefabric.models.ServiceDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_service.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(service_description, 'ServiceDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_service.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/Create'} # type: ignore + + def create_service_from_template( + self, + application_id, # type: str + service_from_template_description, # type: "_models.ServiceFromTemplateDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates a Service Fabric service from the service template. + + Creates a Service Fabric service from the service template defined in the application manifest. + A service template contains the properties that will be same for the service instance of the + same type. The API allows overriding the properties that are usually different for different + services of the same service type. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_from_template_description: Describes the service that needs to be created from + the template defined in the application manifest. + :type service_from_template_description: ~azure.servicefabric.models.ServiceFromTemplateDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_service_from_template.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(service_from_template_description, 'ServiceFromTemplateDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_service_from_template.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/CreateFromTemplate'} # type: ignore + + def delete_service( + self, + service_id, # type: str + force_remove=None, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes an existing Service Fabric service. + + A service must be created before it can be deleted. By default, Service Fabric will try to + close service replicas in a graceful manner and then delete the service. However, if the + service is having issues closing the replica gracefully, the delete operation may take a long + time or get stuck. Use the optional ForceRemove flag to skip the graceful close sequence and + forcefully delete the service. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param force_remove: Remove a Service Fabric application or service forcefully without going + through the graceful shutdown sequence. This parameter can be used to forcefully delete an + application or service for which delete is timing out due to issues in the service code that + prevents graceful close of replicas. + :type force_remove: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_service.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force_remove is not None: + query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_service.metadata = {'url': '/Services/{serviceId}/$/Delete'} # type: ignore + + def update_service( + self, + service_id, # type: str + service_update_description, # type: "_models.ServiceUpdateDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Updates a Service Fabric service using the specified update description. + + This API allows updating properties of a running Service Fabric service. The set of properties + that can be updated are a subset of the properties that were specified at the time of creating + the service. The current set of properties can be obtained using ``GetServiceDescription`` API. + Note that updating the properties of a running service is different than upgrading your + application using ``StartApplicationUpgrade`` API. The upgrade is a long running background + operation that involves moving the application from one version to another, one upgrade domain + at a time, whereas update applies the new properties immediately to the service. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param service_update_description: The information necessary to update a service. + :type service_update_description: ~azure.servicefabric.models.ServiceUpdateDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_service.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(service_update_description, 'ServiceUpdateDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + update_service.metadata = {'url': '/Services/{serviceId}/$/Update'} # type: ignore + + def get_service_description( + self, + service_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ServiceDescription" + """Gets the description of an existing Service Fabric service. + + Gets the description of an existing Service Fabric service. A service must be created before + its description can be obtained. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_description.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_description.metadata = {'url': '/Services/{serviceId}/$/GetDescription'} # type: ignore + + def get_service_health( + self, + service_id, # type: str + events_health_state_filter=0, # type: Optional[int] + partitions_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ServiceHealth" + """Gets the health of the specified Service Fabric service. + + Gets the health information of the specified service. + Use EventsHealthStateFilter to filter the collection of health events reported on the service + based on the health state. + Use PartitionsHealthStateFilter to filter the collection of partitions returned. + If you specify a service that does not exist in the health store, this request returns an + error. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param partitions_health_state_filter: Allows filtering of the partitions health state objects + returned in the result of service health query based on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only partitions that match the filter are returned. All partitions are used to evaluate the + aggregated health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these value + obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health + state of partitions with HealthState value of OK (2) and Warning (4) will be returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type partitions_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_health.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if partitions_health_state_filter is not None: + query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_health.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} # type: ignore + + def get_service_health_using_policy( + self, + service_id, # type: str + events_health_state_filter=0, # type: Optional[int] + partitions_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] + **kwargs # type: Any + ): + # type: (...) -> "_models.ServiceHealth" + """Gets the health of the specified Service Fabric service, by using the specified health policy. + + Gets the health information of the specified service. + If the application health policy is specified, the health evaluation uses it to get the + aggregated health state. + If the policy is not specified, the health evaluation uses the application health policy + defined in the application manifest, or the default health policy, if no policy is defined in + the manifest. + Use EventsHealthStateFilter to filter the collection of health events reported on the service + based on the health state. + Use PartitionsHealthStateFilter to filter the collection of partitions returned. + If you specify a service that does not exist in the health store, this request returns an + error. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param partitions_health_state_filter: Allows filtering of the partitions health state objects + returned in the result of service health query based on their health state. + The possible values for this parameter include integer value of one of the following health + states. + Only partitions that match the filter are returned. All partitions are used to evaluate the + aggregated health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these value + obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health + state of partitions with HealthState value of OK (2) and Warning (4) will be returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type partitions_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_service_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if partitions_health_state_filter is not None: + query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_health_using_policy.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} # type: ignore + + def report_service_health( + self, + service_id, # type: str + health_information, # type: "_models.HealthInformation" + immediate=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Sends a health report on the Service Fabric service. + + Reports health state of the specified Service Fabric service. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway Service, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetServiceHealth and check that + the report appears in the HealthEvents section. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_service_health.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_service_health.metadata = {'url': '/Services/{serviceId}/$/ReportHealth'} # type: ignore + + def resolve_service( + self, + service_id, # type: str + partition_key_type=None, # type: Optional[int] + partition_key_value=None, # type: Optional[str] + previous_rsp_version=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ResolvedServicePartition" + """Resolve a Service Fabric partition. + + Resolve a Service Fabric service partition to get the endpoints of the service replicas. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_key_type: Key type for the partition. This parameter is required if the + partition scheme for the service is Int64Range or Named. The possible values are following. + + + * None (1) - Indicates that the PartitionKeyValue parameter is not specified. This is valid + for the partitions with partitioning scheme as Singleton. This is the default value. The value + is 1. + * Int64Range (2) - Indicates that the PartitionKeyValue parameter is an int64 partition key. + This is valid for the partitions with partitioning scheme as Int64Range. The value is 2. + * Named (3) - Indicates that the PartitionKeyValue parameter is a name of the partition. This + is valid for the partitions with partitioning scheme as Named. The value is 3. + :type partition_key_type: int + :param partition_key_value: Partition key. This is required if the partition scheme for the + service is Int64Range or Named. + This is not the partition ID, but rather, either the integer key value, or the name of the + partition ID. + For example, if your service is using ranged partitions from 0 to 10, then they + PartitionKeyValue would be an + integer in that range. Query service description to see the range or name. + :type partition_key_value: str + :param previous_rsp_version: The value in the Version field of the response that was received + previously. This is required if the user knows that the result that was gotten previously is + stale. + :type previous_rsp_version: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ResolvedServicePartition, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ResolvedServicePartition + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ResolvedServicePartition"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.resolve_service.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if partition_key_type is not None: + query_parameters['PartitionKeyType'] = self._serialize.query("partition_key_type", partition_key_type, 'int') + if partition_key_value is not None: + query_parameters['PartitionKeyValue'] = self._serialize.query("partition_key_value", partition_key_value, 'str', skip_quote=True) + if previous_rsp_version is not None: + query_parameters['PreviousRspVersion'] = self._serialize.query("previous_rsp_version", previous_rsp_version, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ResolvedServicePartition', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + resolve_service.metadata = {'url': '/Services/{serviceId}/$/ResolvePartition'} # type: ignore + + def get_unplaced_replica_information( + self, + service_id, # type: str + partition_id=None, # type: Optional[str] + only_query_primaries=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.UnplacedReplicaInformation" + """Gets the information about unplaced replica of the service. + + Returns the information about the unplaced replicas of the service. + If PartitionId is specified, then result will contain information only about unplaced replicas + for that partition. + If PartitionId is not specified, then result will contain information about unplaced replicas + for all partitions of that service. + If OnlyQueryPrimaries is set to true, then result will contain information only about primary + replicas, and will ignore unplaced secondary replicas. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param only_query_primaries: Indicates that unplaced replica information will be queries only + for primary replicas. + :type only_query_primaries: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UnplacedReplicaInformation, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UnplacedReplicaInformation + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UnplacedReplicaInformation"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_unplaced_replica_information.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if partition_id is not None: + query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') + if only_query_primaries is not None: + query_parameters['OnlyQueryPrimaries'] = self._serialize.query("only_query_primaries", only_query_primaries, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UnplacedReplicaInformation', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_unplaced_replica_information.metadata = {'url': '/Services/{serviceId}/$/GetUnplacedReplicaInformation'} # type: ignore + + def get_loaded_partition_info_list( + self, + metric_name, # type: str + service_name=None, # type: Optional[str] + ordering=None, # type: Optional[Union[str, "_models.Ordering"]] + max_results=0, # type: Optional[int] + continuation_token_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.LoadedPartitionInformationResultList" + """Gets ordered list of partitions. + + Retrieves partitions which are most/least loaded according to specified metric. + + :param metric_name: Name of the metric based on which to get ordered list of partitions. + :type metric_name: str + :param service_name: The name of a service. + :type service_name: str + :param ordering: Ordering of partitions' load. + :type ordering: str or ~azure.servicefabric.models.Ordering + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: LoadedPartitionInformationResultList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.LoadedPartitionInformationResultList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadedPartitionInformationResultList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_loaded_partition_info_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['MetricName'] = self._serialize.query("metric_name", metric_name, 'str') + if service_name is not None: + query_parameters['ServiceName'] = self._serialize.query("service_name", service_name, 'str') + if ordering is not None: + query_parameters['Ordering'] = self._serialize.query("ordering", ordering, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('LoadedPartitionInformationResultList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_loaded_partition_info_list.metadata = {'url': '/$/GetLoadedPartitionInfoList'} # type: ignore + + def get_partition_info_list( + self, + service_id, # type: str + continuation_token_parameter=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedServicePartitionInfoList" + """Gets the list of partitions of a Service Fabric service. + + The response includes the partition ID, partitioning scheme information, keys supported by the + partition, status, health, and other details about the partition. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedServicePartitionInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedServicePartitionInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServicePartitionInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedServicePartitionInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_info_list.metadata = {'url': '/Services/{serviceId}/$/GetPartitions'} # type: ignore + + def get_partition_info( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.ServicePartitionInfo"] + """Gets the information about a Service Fabric partition. + + Gets the information about the specified partition. The response includes the partition ID, + partitioning scheme information, keys supported by the partition, status, health, and other + details about the partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServicePartitionInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServicePartitionInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServicePartitionInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_info.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServicePartitionInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_info.metadata = {'url': '/Partitions/{partitionId}'} # type: ignore + + def get_service_name_info( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ServiceNameInfo" + """Gets the name of the Service Fabric service for a partition. + + Gets name of the service for the specified partition. A 404 error is returned if the partition + ID does not exist in the cluster. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceNameInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ServiceNameInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceNameInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_name_info.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceNameInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_name_info.metadata = {'url': '/Partitions/{partitionId}/$/GetServiceName'} # type: ignore + + def get_partition_health( + self, + partition_id, # type: str + events_health_state_filter=0, # type: Optional[int] + replicas_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PartitionHealth" + """Gets the health of the specified Service Fabric partition. + + Use EventsHealthStateFilter to filter the collection of health events reported on the service + based on the health state. + Use ReplicasHealthStateFilter to filter the collection of ReplicaHealthState objects on the + partition. + If you specify a partition that does not exist in the health store, this request returns an + error. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param replicas_health_state_filter: Allows filtering the collection of ReplicaHealthState + objects on the partition. The value can be obtained from members or bitwise operations on + members of HealthStateFilter. Only replicas that match the filter will be returned. All + replicas will be used to evaluate the aggregated health state. If not specified, all entries + will be returned.The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. For example, If the provided + value is 6 then all of the events with HealthState value of OK (2) and Warning (4) will be + returned. The possible values for this parameter include integer value of one of the following + health states. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type replicas_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_health.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if replicas_health_state_filter is not None: + query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} # type: ignore + + def get_partition_health_using_policy( + self, + partition_id, # type: str + events_health_state_filter=0, # type: Optional[int] + replicas_health_state_filter=0, # type: Optional[int] + exclude_health_statistics=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] + **kwargs # type: Any + ): + # type: (...) -> "_models.PartitionHealth" + """Gets the health of the specified Service Fabric partition, by using the specified health policy. + + Gets the health information of the specified partition. + If the application health policy is specified, the health evaluation uses it to get the + aggregated health state. + If the policy is not specified, the health evaluation uses the application health policy + defined in the application manifest, or the default health policy, if no policy is defined in + the manifest. + Use EventsHealthStateFilter to filter the collection of health events reported on the partition + based on the health state. + Use ReplicasHealthStateFilter to filter the collection of ReplicaHealthState objects on the + partition. Use ApplicationHealthPolicy in the POST body to override the health policies used to + evaluate the health. + If you specify a partition that does not exist in the health store, this request returns an + error. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param replicas_health_state_filter: Allows filtering the collection of ReplicaHealthState + objects on the partition. The value can be obtained from members or bitwise operations on + members of HealthStateFilter. Only replicas that match the filter will be returned. All + replicas will be used to evaluate the aggregated health state. If not specified, all entries + will be returned.The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. For example, If the provided + value is 6 then all of the events with HealthState value of OK (2) and Warning (4) will be + returned. The possible values for this parameter include integer value of one of the following + health states. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type replicas_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health statistics should be returned as + part of the query result. False by default. + The statistics show the number of children entities in health state Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_partition_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if replicas_health_state_filter is not None: + query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} # type: ignore + + def report_partition_health( + self, + partition_id, # type: str + health_information, # type: "_models.HealthInformation" + immediate=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Sends a health report on the Service Fabric partition. + + Reports health state of the specified Service Fabric partition. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway Partition, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetPartitionHealth and check + that the report appears in the HealthEvents section. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_partition_health.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/ReportHealth'} # type: ignore + + def get_partition_load_information( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PartitionLoadInformation" + """Gets the load information of the specified Service Fabric partition. + + Returns information about the load of a specified partition. + The response includes a list of load reports for a Service Fabric partition. + Each report includes the load metric name, value, and last reported time in UTC. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionLoadInformation, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionLoadInformation + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionLoadInformation"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_load_information.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionLoadInformation', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_load_information.metadata = {'url': '/Partitions/{partitionId}/$/GetLoadInformation'} # type: ignore + + def reset_partition_load( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Resets the current load of a Service Fabric partition. + + Resets the current load of a Service Fabric partition to the default load for the service. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.reset_partition_load.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + reset_partition_load.metadata = {'url': '/Partitions/{partitionId}/$/ResetLoad'} # type: ignore + + def recover_partition( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Indicates to the Service Fabric cluster that it should attempt to recover a specific partition that is currently stuck in quorum loss. + + This operation should only be performed if it is known that the replicas that are down cannot + be recovered. Incorrect use of this API can cause potential data loss. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.recover_partition.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + recover_partition.metadata = {'url': '/Partitions/{partitionId}/$/Recover'} # type: ignore + + def recover_service_partitions( + self, + service_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Indicates to the Service Fabric cluster that it should attempt to recover the specified service that is currently stuck in quorum loss. + + Indicates to the Service Fabric cluster that it should attempt to recover the specified service + that is currently stuck in quorum loss. This operation should only be performed if it is known + that the replicas that are down cannot be recovered. Incorrect use of this API can cause + potential data loss. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.recover_service_partitions.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + recover_service_partitions.metadata = {'url': '/Services/$/{serviceId}/$/GetPartitions/$/Recover'} # type: ignore + + def recover_system_partitions( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Indicates to the Service Fabric cluster that it should attempt to recover the system services that are currently stuck in quorum loss. + + Indicates to the Service Fabric cluster that it should attempt to recover the system services + that are currently stuck in quorum loss. This operation should only be performed if it is known + that the replicas that are down cannot be recovered. Incorrect use of this API can cause + potential data loss. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.recover_system_partitions.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + recover_system_partitions.metadata = {'url': '/$/RecoverSystemPartitions'} # type: ignore + + def recover_all_partitions( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Indicates to the Service Fabric cluster that it should attempt to recover any services (including system services) which are currently stuck in quorum loss. + + This operation should only be performed if it is known that the replicas that are down cannot + be recovered. Incorrect use of this API can cause potential data loss. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.recover_all_partitions.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + recover_all_partitions.metadata = {'url': '/$/RecoverAllPartitions'} # type: ignore + + def move_primary_replica( + self, + partition_id, # type: str + node_name=None, # type: Optional[str] + ignore_constraints=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Moves the primary replica of a partition of a stateful service. + + This command moves the primary replica of a partition of a stateful service, respecting all + constraints. + If NodeName parameter is specified, primary will be moved to the specified node (if constraints + allow it). + If NodeName parameter is not specified, primary replica will be moved to a random node in the + cluster. + If IgnoreConstraints parameter is specified and set to true, then primary will be moved + regardless of the constraints. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param node_name: The name of the node. + :type node_name: str + :param ignore_constraints: Ignore constraints when moving a replica or instance. If this + parameter is not specified, all constraints are honored. + :type ignore_constraints: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.move_primary_replica.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if node_name is not None: + query_parameters['NodeName'] = self._serialize.query("node_name", node_name, 'str') + if ignore_constraints is not None: + query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + move_primary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MovePrimaryReplica'} # type: ignore + + def move_secondary_replica( + self, + partition_id, # type: str + current_node_name, # type: str + new_node_name=None, # type: Optional[str] + ignore_constraints=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Moves the secondary replica of a partition of a stateful service. + + This command moves the secondary replica of a partition of a stateful service, respecting all + constraints. + CurrentNodeName parameter must be specified to identify the replica that is moved. + Source node name must be specified, but new node name can be omitted, and in that case replica + is moved to a random node. + If IgnoreConstraints parameter is specified and set to true, then secondary will be moved + regardless of the constraints. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param current_node_name: The name of the source node for secondary replica move. + :type current_node_name: str + :param new_node_name: The name of the target node for secondary replica or instance move. If + not specified, replica or instance is moved to a random node. + :type new_node_name: str + :param ignore_constraints: Ignore constraints when moving a replica or instance. If this + parameter is not specified, all constraints are honored. + :type ignore_constraints: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.move_secondary_replica.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') + if new_node_name is not None: + query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') + if ignore_constraints is not None: + query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + move_secondary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MoveSecondaryReplica'} # type: ignore + + def update_partition_load( + self, + partition_metric_load_description_list, # type: List["_models.PartitionMetricLoadDescription"] + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedUpdatePartitionLoadResultList" + """Update the loads of provided partitions for specific metrics. + + Updates the load value and predicted load value for all the partitions provided for specified + metrics. + + :param partition_metric_load_description_list: Description of updating load for list of + partitions. + :type partition_metric_load_description_list: list[~azure.servicefabric.models.PartitionMetricLoadDescription] + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedUpdatePartitionLoadResultList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedUpdatePartitionLoadResultList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedUpdatePartitionLoadResultList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_partition_load.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(partition_metric_load_description_list, '[PartitionMetricLoadDescription]') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedUpdatePartitionLoadResultList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update_partition_load.metadata = {'url': '/$/UpdatePartitionLoad'} # type: ignore + + def move_instance( + self, + service_id, # type: str + partition_id, # type: str + current_node_name=None, # type: Optional[str] + new_node_name=None, # type: Optional[str] + ignore_constraints=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Moves the instance of a partition of a stateless service. + + This command moves the instance of a partition of a stateless service, respecting all + constraints. + Partition id and service name must be specified to be able to move the instance. + CurrentNodeName when specified identifies the instance that is moved. If not specified, random + instance will be moved + New node name can be omitted, and in that case instance is moved to a random node. + If IgnoreConstraints parameter is specified and set to true, then instance will be moved + regardless of the constraints. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param current_node_name: The name of the source node for instance move. If not specified, + instance is moved from a random node. + :type current_node_name: str + :param new_node_name: The name of the target node for secondary replica or instance move. If + not specified, replica or instance is moved to a random node. + :type new_node_name: str + :param ignore_constraints: Ignore constraints when moving a replica or instance. If this + parameter is not specified, all constraints are honored. + :type ignore_constraints: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.move_instance.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if current_node_name is not None: + query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') + if new_node_name is not None: + query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') + if ignore_constraints is not None: + query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + move_instance.metadata = {'url': '/Services/{serviceId}/$/GetPartitions/{partitionId}/$/MoveInstance'} # type: ignore + + def create_repair_task( + self, + repair_task, # type: "_models.RepairTask" + **kwargs # type: Any + ): + # type: (...) -> "_models.RepairTaskUpdateInfo" + """Creates a new repair task. + + For clusters that have the Repair Manager Service configured, + this API provides a way to create repair tasks that run automatically or manually. + For repair tasks that run automatically, an appropriate repair executor + must be running for each repair action to run automatically. + These are currently only available in specially-configured Azure Cloud Services. + + To create a manual repair task, provide the set of impacted node names and the + expected impact. When the state of the created repair task changes to approved, + you can safely perform repair actions on those nodes. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param repair_task: Describes the repair task to be created or updated. + :type repair_task: ~azure.servicefabric.models.RepairTask + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_repair_task.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(repair_task, 'RepairTask') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_repair_task.metadata = {'url': '/$/CreateRepairTask'} # type: ignore + + def cancel_repair_task( + self, + repair_task_cancel_description, # type: "_models.RepairTaskCancelDescription" + **kwargs # type: Any + ): + # type: (...) -> "_models.RepairTaskUpdateInfo" + """Requests the cancellation of the given repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param repair_task_cancel_description: Describes the repair task to be cancelled. + :type repair_task_cancel_description: ~azure.servicefabric.models.RepairTaskCancelDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.cancel_repair_task.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(repair_task_cancel_description, 'RepairTaskCancelDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + cancel_repair_task.metadata = {'url': '/$/CancelRepairTask'} # type: ignore + + def delete_repair_task( + self, + task_id, # type: str + version=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes a completed repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param task_id: The ID of the completed repair task to be deleted. + :type task_id: str + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. + :type version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _repair_task_delete_description = _models.RepairTaskDeleteDescription(task_id=task_id, version=version) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.delete_repair_task.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_repair_task_delete_description, 'RepairTaskDeleteDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_repair_task.metadata = {'url': '/$/DeleteRepairTask'} # type: ignore + + def get_repair_task_list( + self, + task_id_filter=None, # type: Optional[str] + state_filter=None, # type: Optional[int] + executor_filter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> List["_models.RepairTask"] + """Gets a list of repair tasks matching the given filters. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param task_id_filter: The repair task ID prefix to be matched. + :type task_id_filter: str + :param state_filter: A bitwise-OR of the following values, specifying which task states should + be included in the result list. + + + * 1 - Created + * 2 - Claimed + * 4 - Preparing + * 8 - Approved + * 16 - Executing + * 32 - Restoring + * 64 - Completed. + :type state_filter: int + :param executor_filter: The name of the repair executor whose claimed tasks should be included + in the list. + :type executor_filter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of RepairTask, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.RepairTask] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.RepairTask"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_repair_task_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if task_id_filter is not None: + query_parameters['TaskIdFilter'] = self._serialize.query("task_id_filter", task_id_filter, 'str') + if state_filter is not None: + query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') + if executor_filter is not None: + query_parameters['ExecutorFilter'] = self._serialize.query("executor_filter", executor_filter, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[RepairTask]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_repair_task_list.metadata = {'url': '/$/GetRepairTaskList'} # type: ignore + + def force_approve_repair_task( + self, + task_id, # type: str + version=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.RepairTaskUpdateInfo" + """Forces the approval of the given repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param task_id: The ID of the repair task. + :type task_id: str + :param version: The current version number of the repair task. If non-zero, then the request + will only succeed if this value matches the actual current version of the repair task. If zero, + then no version check is performed. + :type version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _repair_task_approve_description = _models.RepairTaskApproveDescription(task_id=task_id, version=version) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.force_approve_repair_task.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_repair_task_approve_description, 'RepairTaskApproveDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + force_approve_repair_task.metadata = {'url': '/$/ForceApproveRepairTask'} # type: ignore + + def update_repair_task_health_policy( + self, + repair_task_update_health_policy_description, # type: "_models.RepairTaskUpdateHealthPolicyDescription" + **kwargs # type: Any + ): + # type: (...) -> "_models.RepairTaskUpdateInfo" + """Updates the health policy of the given repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param repair_task_update_health_policy_description: Describes the repair task healthy policy + to be updated. + :type repair_task_update_health_policy_description: ~azure.servicefabric.models.RepairTaskUpdateHealthPolicyDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_repair_task_health_policy.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(repair_task_update_health_policy_description, 'RepairTaskUpdateHealthPolicyDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update_repair_task_health_policy.metadata = {'url': '/$/UpdateRepairTaskHealthPolicy'} # type: ignore + + def update_repair_execution_state( + self, + repair_task, # type: "_models.RepairTask" + **kwargs # type: Any + ): + # type: (...) -> "_models.RepairTaskUpdateInfo" + """Updates the execution state of a repair task. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param repair_task: Describes the repair task to be created or updated. + :type repair_task: ~azure.servicefabric.models.RepairTask + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RepairTaskUpdateInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_repair_execution_state.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(repair_task, 'RepairTask') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update_repair_execution_state.metadata = {'url': '/$/UpdateRepairExecutionState'} # type: ignore + + def get_replica_info_list( + self, + partition_id, # type: str + continuation_token_parameter=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedReplicaInfoList" + """Gets the information about replicas of a Service Fabric service partition. + + The GetReplicas endpoint returns information about the replicas of the specified partition. The + response includes the ID, role, status, health, node name, uptime, and other details about the + replica. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedReplicaInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedReplicaInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedReplicaInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_replica_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedReplicaInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_replica_info_list.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas'} # type: ignore + + def get_replica_info( + self, + partition_id, # type: str + replica_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.ReplicaInfo"] + """Gets the information about a replica of a Service Fabric partition. + + The response includes the ID, role, status, health, node name, uptime, and other details about + the replica. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ReplicaInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ReplicaInfo or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicaInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_replica_info.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ReplicaInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_replica_info.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}'} # type: ignore + + def get_replica_health( + self, + partition_id, # type: str + replica_id, # type: str + events_health_state_filter=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ReplicaHealth" + """Gets the health of a Service Fabric stateful service replica or stateless service instance. + + Gets the health of a Service Fabric replica. + Use EventsHealthStateFilter to filter the collection of health events reported on the replica + based on the health state. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ReplicaHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ReplicaHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicaHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_replica_health.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ReplicaHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} # type: ignore + + def get_replica_health_using_policy( + self, + partition_id, # type: str + replica_id, # type: str + events_health_state_filter=0, # type: Optional[int] + timeout=60, # type: Optional[int] + application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] + **kwargs # type: Any + ): + # type: (...) -> "_models.ReplicaHealth" + """Gets the health of a Service Fabric stateful service replica or stateless service instance using the specified policy. + + Gets the health of a Service Fabric stateful service replica or stateless service instance. + Use EventsHealthStateFilter to filter the collection of health events reported on the cluster + based on the health state. + Use ApplicationHealthPolicy to optionally override the health policies used to evaluate the + health. This API only uses 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The + rest of the fields are ignored while evaluating the health of the replica. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ReplicaHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ReplicaHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicaHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_replica_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ReplicaHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_replica_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} # type: ignore + + def report_replica_health( + self, + partition_id, # type: str + replica_id, # type: str + health_information, # type: "_models.HealthInformation" + service_kind="Stateful", # type: Union[str, "_models.ReplicaHealthReportServiceKind"] + immediate=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Sends a health report on the Service Fabric replica. + + Reports health state of the specified Service Fabric replica. The report must contain the + information about the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway Replica, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, run GetReplicaHealth and check that + the report appears in the HealthEvents section. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param service_kind: The kind of service replica (Stateless or Stateful) for which the health + is being reported. Following are the possible values. + :type service_kind: str or ~azure.servicefabric.models.ReplicaHealthReportServiceKind + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_replica_health.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceKind'] = self._serialize.query("service_kind", service_kind, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/ReportHealth'} # type: ignore + + def get_deployed_service_replica_info_list( + self, + node_name, # type: str + application_id, # type: str + partition_id=None, # type: Optional[str] + service_manifest_name=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional[List["_models.DeployedServiceReplicaInfo"]] + """Gets the list of replicas deployed on a Service Fabric node. + + Gets the list containing the information about replicas deployed on a Service Fabric node. The + information include partition ID, replica ID, status of the replica, name of the service, name + of the service type, and other information. Use PartitionId or ServiceManifestName query + parameters to return information about the deployed replicas matching the specified values for + those parameters. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServiceReplicaInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServiceReplicaInfo] or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServiceReplicaInfo"]]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_replica_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if partition_id is not None: + query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServiceReplicaInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_replica_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetReplicas'} # type: ignore + + def get_deployed_service_replica_detail_info( + self, + node_name, # type: str + partition_id, # type: str + replica_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.DeployedServiceReplicaDetailInfo" + """Gets the details of replica deployed on a Service Fabric node. + + Gets the details of the replica deployed on a Service Fabric node. The information includes + service kind, service name, current service operation, current service operation start date + time, partition ID, replica/instance ID, reported load, and other information. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedServiceReplicaDetailInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServiceReplicaDetailInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_replica_detail_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_replica_detail_info.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetDetail'} # type: ignore + + def get_deployed_service_replica_detail_info_by_partition_id( + self, + node_name, # type: str + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.DeployedServiceReplicaDetailInfo" + """Gets the details of replica deployed on a Service Fabric node. + + Gets the details of the replica deployed on a Service Fabric node. The information includes + service kind, service name, current service operation, current service operation start date + time, partition ID, replica/instance ID, reported load, and other information. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedServiceReplicaDetailInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServiceReplicaDetailInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_replica_detail_info_by_partition_id.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_replica_detail_info_by_partition_id.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas'} # type: ignore + + def restart_replica( + self, + node_name, # type: str + partition_id, # type: str + replica_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Restarts a service replica of a persisted service running on a node. + + Restarts a service replica of a persisted service running on a node. Warning - There are no + safety checks performed when this API is used. Incorrect use of this API can lead to + availability loss for stateful services. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.restart_replica.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + restart_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Restart'} # type: ignore + + def remove_replica( + self, + node_name, # type: str + partition_id, # type: str + replica_id, # type: str + force_remove=None, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Removes a service replica running on a node. + + This API simulates a Service Fabric replica failure by removing a replica from a Service Fabric + cluster. The removal closes the replica, transitions the replica to the role None, and then + removes all of the state information of the replica from the cluster. This API tests the + replica state removal path, and simulates the report fault permanent path through client APIs. + Warning - There are no safety checks performed when this API is used. Incorrect use of this API + can lead to data loss for stateful services. In addition, the forceRemove flag impacts all + other replicas hosted in the same process. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param force_remove: Remove a Service Fabric application or service forcefully without going + through the graceful shutdown sequence. This parameter can be used to forcefully delete an + application or service for which delete is timing out due to issues in the service code that + prevents graceful close of replicas. + :type force_remove: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.remove_replica.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force_remove is not None: + query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Delete'} # type: ignore + + def get_deployed_service_package_info_list( + self, + node_name, # type: str + application_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List["_models.DeployedServicePackageInfo"] + """Gets the list of service packages deployed on a Service Fabric node. + + Returns the information about the service packages deployed on a Service Fabric node for the + given application. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServicePackageInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedServicePackageInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_package_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[DeployedServicePackageInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages'} # type: ignore + + def get_deployed_service_package_info_list_by_name( + self, + node_name, # type: str + application_id, # type: str + service_package_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Optional[List["_models.DeployedServicePackageInfo"]] + """Gets the list of service packages deployed on a Service Fabric node matching exactly the specified name. + + Returns the information about the service packages deployed on a Service Fabric node for the + given application. These results are of service packages whose name match exactly the service + package name specified as the parameter. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedServicePackageInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServicePackageInfo"]]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_package_info_list_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServicePackageInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_package_info_list_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}'} # type: ignore + + def get_deployed_service_package_health( + self, + node_name, # type: str + application_id, # type: str + service_package_name, # type: str + events_health_state_filter=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.DeployedServicePackageHealth" + """Gets the information about health of a service package for a specific application deployed for a Service Fabric node and application. + + Gets the information about health of a service package for a specific application deployed on a + Service Fabric node. Use EventsHealthStateFilter to optionally filter for the collection of + HealthEvent objects reported on the deployed service package based on health state. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedServicePackageHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServicePackageHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_package_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedServicePackageHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} # type: ignore + + def get_deployed_service_package_health_using_policy( + self, + node_name, # type: str + application_id, # type: str + service_package_name, # type: str + events_health_state_filter=0, # type: Optional[int] + timeout=60, # type: Optional[int] + application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] + **kwargs # type: Any + ): + # type: (...) -> "_models.DeployedServicePackageHealth" + """Gets the information about health of service package for a specific application deployed on a Service Fabric node using the specified policy. + + Gets the information about health of a service package for a specific application deployed on a + Service Fabric node. using the specified policy. Use EventsHealthStateFilter to optionally + filter for the collection of HealthEvent objects reported on the deployed service package based + on health state. Use ApplicationHealthPolicy to optionally override the health policies used to + evaluate the health. This API only uses 'ConsiderWarningAsError' field of the + ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the + deployed service package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param events_health_state_filter: Allows filtering the collection of HealthEvent objects + returned based on health state. + The possible values for this parameter include integer value of one of the following health + states. + Only events that match the filter are returned. All events are used to evaluate the aggregated + health state. + If not specified, all entries are returned. The state values are flag-based enumeration, so + the value could be a combination of these values, obtained using the bitwise 'OR' operator. For + example, If the provided value is 6 then all of the events with HealthState value of OK (2) and + Warning (4) are returned. + + + * Default - Default value. Matches any HealthState. The value is zero. + * None - Filter that doesn't match any HealthState value. Used in order to return no results + on a given collection of states. The value is 1. + * Ok - Filter that matches input with HealthState value Ok. The value is 2. + * Warning - Filter that matches input with HealthState value Warning. The value is 4. + * Error - Filter that matches input with HealthState value Error. The value is 8. + * All - Filter that matches input with any HealthState value. The value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy: Describes the health policies used to evaluate the health of + an application or one of its children. + If not present, the health evaluation uses the health policy from application manifest or the + default health policy. + :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DeployedServicePackageHealth, or the result of cls(response) + :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServicePackageHealth"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_deployed_service_package_health_using_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DeployedServicePackageHealth', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_service_package_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} # type: ignore + + def report_deployed_service_package_health( + self, + node_name, # type: str + application_id, # type: str + service_package_name, # type: str + health_information, # type: "_models.HealthInformation" + immediate=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Sends a health report on the Service Fabric deployed service package. + + Reports health state of the service package of the application deployed on a Service Fabric + node. The report must contain the information about the source of the health report and + property on which it is reported. + The report is sent to a Service Fabric gateway Service, which forwards to the health store. + The report may be accepted by the gateway, but rejected by the health store after extra + validation. + For example, the health store may reject the report because of an invalid parameter, like a + stale sequence number. + To see whether the report was applied in the health store, get deployed service package health + and check that the report appears in the HealthEvents section. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param health_information: Describes the health information for the health report. This + information needs to be present in all of the health reports sent to the health manager. + :type health_information: ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be sent immediately. + A health report is sent to a Service Fabric gateway Application, which forwards to the health + store. + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health + store, regardless of the fabric client settings that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as possible. + Depending on timing and other conditions, sending the report may still fail, for example if + the HTTP Gateway is closed or the message doesn't reach the Gateway. + If Immediate is set to false, the report is sent based on the health client settings from the + HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval + configuration. + This is the recommended setting because it allows the health client to optimize health + reporting messages to health store as well as health report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.report_deployed_service_package_health.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(health_information, 'HealthInformation') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + report_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/ReportHealth'} # type: ignore + + def deploy_service_package_to_node( + self, + node_name, # type: str + deploy_service_package_to_node_description, # type: "_models.DeployServicePackageToNodeDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Downloads all of the code packages associated with specified service manifest on the specified node. + + This API provides a way to download code packages including the container images on a specific + node outside of the normal application deployment and upgrade path. This is useful for the + large code packages and container images to be present on the node before the actual + application deployment and upgrade, thus significantly reducing the total time required for the + deployment or upgrade. + + :param node_name: The name of the node. + :type node_name: str + :param deploy_service_package_to_node_description: Describes information for deploying a + service package to a Service Fabric node. + :type deploy_service_package_to_node_description: ~azure.servicefabric.models.DeployServicePackageToNodeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.deploy_service_package_to_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(deploy_service_package_to_node_description, 'DeployServicePackageToNodeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + deploy_service_package_to_node.metadata = {'url': '/Nodes/{nodeName}/$/DeployServicePackage'} # type: ignore + + def get_deployed_code_package_info_list( + self, + node_name, # type: str + application_id, # type: str + service_manifest_name=None, # type: Optional[str] + code_package_name=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List["_models.DeployedCodePackageInfo"] + """Gets the list of code packages deployed on a Service Fabric node. + + Gets the list of code packages deployed on a Service Fabric node for the given application. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in service manifest registered as + part of an application type in a Service Fabric cluster. + :type code_package_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DeployedCodePackageInfo, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.DeployedCodePackageInfo] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedCodePackageInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_deployed_code_package_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if code_package_name is not None: + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[DeployedCodePackageInfo]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_deployed_code_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages'} # type: ignore + + def restart_deployed_code_package( + self, + node_name, # type: str + application_id, # type: str + restart_deployed_code_package_description, # type: "_models.RestartDeployedCodePackageDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Restarts a code package deployed on a Service Fabric node in a cluster. + + Restarts a code package deployed on a Service Fabric node in a cluster. This aborts the code + package process, which will restart all the user service replicas hosted in that process. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param restart_deployed_code_package_description: Describes the deployed code package on + Service Fabric node to restart. + :type restart_deployed_code_package_description: ~azure.servicefabric.models.RestartDeployedCodePackageDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.restart_deployed_code_package.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(restart_deployed_code_package_description, 'RestartDeployedCodePackageDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + restart_deployed_code_package.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/Restart'} # type: ignore + + def get_container_logs_deployed_on_node( + self, + node_name, # type: str + application_id, # type: str + service_manifest_name, # type: str + code_package_name, # type: str + tail=None, # type: Optional[str] + previous=False, # type: Optional[bool] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ContainerLogs" + """Gets the container logs for container deployed on a Service Fabric node. + + Gets the container logs for container deployed on a Service Fabric node for the given code + package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in service manifest registered as + part of an application type in a Service Fabric cluster. + :type code_package_name: str + :param tail: Number of lines to show from the end of the logs. Default is 100. 'all' to show + the complete logs. + :type tail: str + :param previous: Specifies whether to get container logs from exited/dead containers of the + code package instance. + :type previous: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ContainerLogs, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ContainerLogs + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerLogs"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_container_logs_deployed_on_node.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + if tail is not None: + query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') + if previous is not None: + query_parameters['Previous'] = self._serialize.query("previous", previous, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ContainerLogs', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_container_logs_deployed_on_node.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerLogs'} # type: ignore + + def invoke_container_api( + self, + node_name, # type: str + application_id, # type: str + service_manifest_name, # type: str + code_package_name, # type: str + code_package_instance_id, # type: str + container_api_request_body, # type: "_models.ContainerApiRequestBody" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ContainerApiResponse" + """Invoke container API on a container deployed on a Service Fabric node. + + Invoke container API on a container deployed on a Service Fabric node for the given code + package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest registered as part of an + application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in service manifest registered as + part of an application type in a Service Fabric cluster. + :type code_package_name: str + :param code_package_instance_id: ID that uniquely identifies a code package instance deployed + on a service fabric node. + :type code_package_instance_id: str + :param container_api_request_body: Parameters for making container API call. + :type container_api_request_body: ~azure.servicefabric.models.ContainerApiRequestBody + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ContainerApiResponse, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ContainerApiResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerApiResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.invoke_container_api.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + query_parameters['CodePackageInstanceId'] = self._serialize.query("code_package_instance_id", code_package_instance_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(container_api_request_body, 'ContainerApiRequestBody') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ContainerApiResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + invoke_container_api.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerApi'} # type: ignore + + def create_compose_deployment( + self, + create_compose_deployment_description, # type: "_models.CreateComposeDeploymentDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates a Service Fabric compose deployment. + + Compose is a file format that describes multi-container applications. This API allows deploying + container based applications defined in compose format in a Service Fabric cluster. Once the + deployment is created, its status can be tracked via the ``GetComposeDeploymentStatus`` API. + + :param create_compose_deployment_description: Describes the compose deployment that needs to be + created. + :type create_compose_deployment_description: ~azure.servicefabric.models.CreateComposeDeploymentDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_compose_deployment.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(create_compose_deployment_description, 'CreateComposeDeploymentDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_compose_deployment.metadata = {'url': '/ComposeDeployments/$/Create'} # type: ignore + + def get_compose_deployment_status( + self, + deployment_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ComposeDeploymentStatusInfo" + """Gets information about a Service Fabric compose deployment. + + Returns the status of the compose deployment that was created or in the process of being + created in the Service Fabric cluster and whose name matches the one specified as the + parameter. The response includes the name, status, and other details about the deployment. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ComposeDeploymentStatusInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ComposeDeploymentStatusInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ComposeDeploymentStatusInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_compose_deployment_status.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ComposeDeploymentStatusInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_compose_deployment_status.metadata = {'url': '/ComposeDeployments/{deploymentName}'} # type: ignore + + def get_compose_deployment_status_list( + self, + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedComposeDeploymentStatusInfoList" + """Gets the list of compose deployments created in the Service Fabric cluster. + + Gets the status about the compose deployments that were created or in the process of being + created in the Service Fabric cluster. The response includes the name, status, and other + details about the compose deployments. If the list of deployments do not fit in a page, one + page of results is returned as well as a continuation token, which can be used to get the next + page. + + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedComposeDeploymentStatusInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedComposeDeploymentStatusInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedComposeDeploymentStatusInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_compose_deployment_status_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedComposeDeploymentStatusInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_compose_deployment_status_list.metadata = {'url': '/ComposeDeployments'} # type: ignore + + def get_compose_deployment_upgrade_progress( + self, + deployment_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ComposeDeploymentUpgradeProgressInfo" + """Gets details for the latest upgrade performed on this Service Fabric compose deployment. + + Returns the information about the state of the compose deployment upgrade along with details to + aid debugging application health issues. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ComposeDeploymentUpgradeProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ComposeDeploymentUpgradeProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ComposeDeploymentUpgradeProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_compose_deployment_upgrade_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ComposeDeploymentUpgradeProgressInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_compose_deployment_upgrade_progress.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/GetUpgradeProgress'} # type: ignore + + def remove_compose_deployment( + self, + deployment_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes an existing Service Fabric compose deployment from cluster. + + Deletes an existing Service Fabric compose deployment. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.remove_compose_deployment.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + remove_compose_deployment.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Delete'} # type: ignore + + def start_compose_deployment_upgrade( + self, + deployment_name, # type: str + compose_deployment_upgrade_description, # type: "_models.ComposeDeploymentUpgradeDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Starts upgrading a compose deployment in the Service Fabric cluster. + + Validates the supplied upgrade parameters and starts upgrading the deployment if the parameters + are valid. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param compose_deployment_upgrade_description: Parameters for upgrading compose deployment. + :type compose_deployment_upgrade_description: ~azure.servicefabric.models.ComposeDeploymentUpgradeDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_compose_deployment_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(compose_deployment_upgrade_description, 'ComposeDeploymentUpgradeDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Upgrade'} # type: ignore + + def start_rollback_compose_deployment_upgrade( + self, + deployment_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Starts rolling back a compose deployment upgrade in the Service Fabric cluster. + + Rollback a service fabric compose deployment upgrade. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_rollback_compose_deployment_upgrade.metadata['url'] # type: ignore + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_rollback_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/RollbackUpgrade'} # type: ignore + + def get_chaos( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.Chaos" + """Get the status of Chaos. + + Get the status of Chaos indicating whether or not Chaos is running, the Chaos parameters used + for running Chaos and the status of the Chaos Schedule. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Chaos, or the result of cls(response) + :rtype: ~azure.servicefabric.models.Chaos + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.Chaos"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_chaos.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Chaos', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_chaos.metadata = {'url': '/Tools/Chaos'} # type: ignore + + def start_chaos( + self, + chaos_parameters, # type: "_models.ChaosParameters" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Starts Chaos in the cluster. + + If Chaos is not already running in the cluster, it starts Chaos with the passed in Chaos + parameters. + If Chaos is already running when this call is made, the call fails with the error code + FABRIC_E_CHAOS_ALREADY_RUNNING. + Refer to the article `Induce controlled Chaos in Service Fabric clusters + `_ for more + details. + + :param chaos_parameters: Describes all the parameters to configure a Chaos run. + :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.start_chaos.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(chaos_parameters, 'ChaosParameters') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_chaos.metadata = {'url': '/Tools/Chaos/$/Start'} # type: ignore + + def stop_chaos( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Stops Chaos if it is running in the cluster and put the Chaos Schedule in a stopped state. + + Stops Chaos from executing new faults. In-flight faults will continue to execute until they are + complete. The current Chaos Schedule is put into a stopped state. + Once a schedule is stopped, it will stay in the stopped state and not be used to Chaos Schedule + new runs of Chaos. A new Chaos Schedule must be set in order to resume scheduling. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.stop_chaos.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + stop_chaos.metadata = {'url': '/Tools/Chaos/$/Stop'} # type: ignore + + def get_chaos_events( + self, + continuation_token_parameter=None, # type: Optional[str] + start_time_utc=None, # type: Optional[str] + end_time_utc=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ChaosEventsSegment" + """Gets the next segment of the Chaos events based on the continuation token or the time range. + + To get the next segment of the Chaos events, you can specify the ContinuationToken. To get the + start of a new segment of Chaos events, you can specify the time range + through StartTimeUtc and EndTimeUtc. You cannot specify both the ContinuationToken and the time + range in the same call. + When there are more than 100 Chaos events, the Chaos events are returned in multiple segments + where a segment contains no more than 100 Chaos events and to get the next segment you make a + call to this API with the continuation token. + + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param start_time_utc: The Windows file time representing the start time of the time range for + which a Chaos report is to be generated. Consult `DateTime.ToFileTimeUtc Method + `_.aspx) for + details. + :type start_time_utc: str + :param end_time_utc: The Windows file time representing the end time of the time range for + which a Chaos report is to be generated. Consult `DateTime.ToFileTimeUtc Method + `_.aspx) for + details. + :type end_time_utc: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ChaosEventsSegment, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ChaosEventsSegment + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ChaosEventsSegment"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_chaos_events.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if start_time_utc is not None: + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + if end_time_utc is not None: + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ChaosEventsSegment', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_chaos_events.metadata = {'url': '/Tools/Chaos/Events'} # type: ignore + + def get_chaos_schedule( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ChaosScheduleDescription" + """Get the Chaos Schedule defining when and how to run Chaos. + + Gets the version of the Chaos Schedule in use and the Chaos Schedule that defines when and how + to run Chaos. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ChaosScheduleDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ChaosScheduleDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ChaosScheduleDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_chaos_schedule.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ChaosScheduleDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} # type: ignore + + def post_chaos_schedule( + self, + timeout=60, # type: Optional[int] + version=None, # type: Optional[int] + schedule=None, # type: Optional["_models.ChaosSchedule"] + **kwargs # type: Any + ): + # type: (...) -> None + """Set the schedule used by Chaos. + + Chaos will automatically schedule runs based on the Chaos Schedule. + The Chaos Schedule will be updated if the provided version matches the version on the server. + When updating the Chaos Schedule, the version on the server is incremented by 1. + The version on the server will wrap back to 0 after reaching a large number. + If Chaos is running when this call is made, the call will fail. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param version: The version number of the Schedule. + :type version: int + :param schedule: Defines the schedule used by Chaos. + :type schedule: ~azure.servicefabric.models.ChaosSchedule + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _chaos_schedule = _models.ChaosScheduleDescription(version=version, schedule=schedule) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.post_chaos_schedule.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_chaos_schedule, 'ChaosScheduleDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + post_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} # type: ignore + + def upload_file( + self, + content_path, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Uploads contents of the file to the image store. + + Uploads contents of the file to the image store. Use this API if the file is small enough to + upload again if the connection fails. The file's data needs to be added to the request body. + The contents will be uploaded to the specified path. Image store service uses a mark file to + indicate the availability of the folder. The mark file is an empty file named "_.dir". The mark + file is generated by the image store service when all files in a folder are uploaded. When + using File-by-File approach to upload application package in REST, the image store service + isn't aware of the file hierarchy of the application package; you need to create a mark file + per folder and upload it last, to let the image store service know that the folder is complete. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.upload_file.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + upload_file.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore + + def get_image_store_content( + self, + content_path, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ImageStoreContent" + """Gets the image store content information. + + Returns the information about the image store content at the specified contentPath. The + contentPath is relative to the root of the image store. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ImageStoreContent, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ImageStoreContent + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreContent"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_content.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ImageStoreContent', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore + + def delete_image_store_content( + self, + content_path, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes existing image store content. + + Deletes existing image store content being found within the given image store relative path. + This command can be used to delete uploaded application packages once they are provisioned. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_image_store_content.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore + + def get_image_store_root_content( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ImageStoreContent" + """Gets the content information at the root of the image store. + + Returns the information about the image store content at the root of the image store. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ImageStoreContent, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ImageStoreContent + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreContent"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_root_content.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ImageStoreContent', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_root_content.metadata = {'url': '/ImageStore'} # type: ignore + + def copy_image_store_content( + self, + image_store_copy_description, # type: "_models.ImageStoreCopyDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Copies image store content internally. + + Copies the image store content from the source image store relative path to the destination + image store relative path. + + :param image_store_copy_description: Describes the copy description for the image store. + :type image_store_copy_description: ~azure.servicefabric.models.ImageStoreCopyDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.copy_image_store_content.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(image_store_copy_description, 'ImageStoreCopyDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + copy_image_store_content.metadata = {'url': '/ImageStore/$/Copy'} # type: ignore + + def delete_image_store_upload_session( + self, + session_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Cancels an image store upload session. + + The DELETE request will cause the existing upload session to expire and remove any previously + uploaded file chunks. + + :param session_id: A GUID generated by the user for a file uploading. It identifies an image + store upload session which keeps track of all file chunks until it is committed. + :type session_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_image_store_upload_session.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_image_store_upload_session.metadata = {'url': '/ImageStore/$/DeleteUploadSession'} # type: ignore + + def commit_image_store_upload_session( + self, + session_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Commit an image store upload session. + + When all file chunks have been uploaded, the upload session needs to be committed explicitly to + complete the upload. Image store preserves the upload session until the expiration time, which + is 30 minutes after the last chunk received. + + :param session_id: A GUID generated by the user for a file uploading. It identifies an image + store upload session which keeps track of all file chunks until it is committed. + :type session_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.commit_image_store_upload_session.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + commit_image_store_upload_session.metadata = {'url': '/ImageStore/$/CommitUploadSession'} # type: ignore + + def get_image_store_upload_session_by_id( + self, + session_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.UploadSession" + """Get the image store upload session by ID. + + Gets the image store upload session identified by the given ID. User can query the upload + session at any time during uploading. + + :param session_id: A GUID generated by the user for a file uploading. It identifies an image + store upload session which keeps track of all file chunks until it is committed. + :type session_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UploadSession, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UploadSession + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UploadSession"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_upload_session_by_id.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UploadSession', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_upload_session_by_id.metadata = {'url': '/ImageStore/$/GetUploadSession'} # type: ignore + + def get_image_store_upload_session_by_path( + self, + content_path, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.UploadSession" + """Get the image store upload session by relative path. + + Gets the image store upload session associated with the given image store relative path. User + can query the upload session at any time during uploading. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UploadSession, or the result of cls(response) + :rtype: ~azure.servicefabric.models.UploadSession + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UploadSession"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_upload_session_by_path.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('UploadSession', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_upload_session_by_path.metadata = {'url': '/ImageStore/{contentPath}/$/GetUploadSession'} # type: ignore + + def upload_file_chunk( + self, + content_path, # type: str + session_id, # type: str + content_range, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Uploads a file chunk to the image store relative path. + + Uploads a file chunk to the image store with the specified upload session ID and image store + relative path. This API allows user to resume the file upload operation. user doesn't have to + restart the file upload from scratch whenever there is a network interruption. Use this option + if the file size is large. + + To perform a resumable file upload, user need to break the file into multiple chunks and upload + these chunks to the image store one-by-one. Chunks don't have to be uploaded in order. If the + file represented by the image store relative path already exists, it will be overwritten when + the upload session commits. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param session_id: A GUID generated by the user for a file uploading. It identifies an image + store upload session which keeps track of all file chunks until it is committed. + :type session_id: str + :param content_range: When uploading file chunks to the image store, the Content-Range header + field need to be configured and sent with a request. The format should looks like "bytes + {First-Byte-Position}-{Last-Byte-Position}/{File-Length}". For example, Content-Range:bytes + 300-5000/20000 indicates that user is sending bytes 300 through 5,000 and the total file length + is 20,000 bytes. + :type content_range: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.upload_file_chunk.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Range'] = self._serialize.header("content_range", content_range, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + upload_file_chunk.metadata = {'url': '/ImageStore/{contentPath}/$/UploadChunk'} # type: ignore + + def get_image_store_root_folder_size( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.FolderSizeInfo" + """Get the folder size at the root of the image store. + + Returns the total size of files at the root and children folders in image store. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FolderSizeInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.FolderSizeInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FolderSizeInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_root_folder_size.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('FolderSizeInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_root_folder_size.metadata = {'url': '/ImageStore/$/FolderSize'} # type: ignore + + def get_image_store_folder_size( + self, + content_path, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.FolderSizeInfo" + """Get the size of a folder in image store. + + Gets the total size of file under a image store folder, specified by contentPath. The + contentPath is relative to the root of the image store. + + :param content_path: Relative path to file or folder in the image store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FolderSizeInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.FolderSizeInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FolderSizeInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_folder_size.metadata['url'] # type: ignore + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('FolderSizeInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_folder_size.metadata = {'url': '/ImageStore/{contentPath}/$/FolderSize'} # type: ignore + + def get_image_store_info( + self, + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ImageStoreInfo" + """Gets the overall ImageStore information. + + Returns information about the primary ImageStore replica, such as disk capacity and available + disk space at the node it is on, and several categories of the ImageStore's file system usage. + + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ImageStoreInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.ImageStoreInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_image_store_info.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ImageStoreInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_image_store_info.metadata = {'url': '/ImageStore/$/Info'} # type: ignore + + def invoke_infrastructure_command( + self, + command, # type: str + service_id=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> IO + """Invokes an administrative command on the given Infrastructure Service instance. + + For clusters that have one or more instances of the Infrastructure Service configured, + this API provides a way to send infrastructure-specific commands to a particular + instance of the Infrastructure Service. + + Available commands and their corresponding response formats vary depending upon + the infrastructure on which the cluster is running. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param command: The text of the command to be invoked. The content of the command is + infrastructure-specific. + :type command: str + :param service_id: The identity of the infrastructure service. This is the full name of the + infrastructure service without the 'fabric:' URI scheme. This parameter required only for the + cluster that has more than one instance of infrastructure service running. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.invoke_infrastructure_command.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['Command'] = self._serialize.query("command", command, 'str') + if service_id is not None: + query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + invoke_infrastructure_command.metadata = {'url': '/$/InvokeInfrastructureCommand'} # type: ignore + + def invoke_infrastructure_query( + self, + command, # type: str + service_id=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> IO + """Invokes a read-only query on the given infrastructure service instance. + + For clusters that have one or more instances of the Infrastructure Service configured, + this API provides a way to send infrastructure-specific queries to a particular + instance of the Infrastructure Service. + + Available commands and their corresponding response formats vary depending upon + the infrastructure on which the cluster is running. + + This API supports the Service Fabric platform; it is not meant to be used directly from your + code. + + :param command: The text of the command to be invoked. The content of the command is + infrastructure-specific. + :type command: str + :param service_id: The identity of the infrastructure service. This is the full name of the + infrastructure service without the 'fabric:' URI scheme. This parameter required only for the + cluster that has more than one instance of infrastructure service running. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.invoke_infrastructure_query.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['Command'] = self._serialize.query("command", command, 'str') + if service_id is not None: + query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + invoke_infrastructure_query.metadata = {'url': '/$/InvokeInfrastructureQuery'} # type: ignore + + def start_data_loss( + self, + service_id, # type: str + partition_id, # type: str + operation_id, # type: str + data_loss_mode, # type: Union[str, "_models.DataLossMode"] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """This API will induce data loss for the specified partition. It will trigger a call to the OnDataLossAsync API of the partition. + + This API will induce data loss for the specified partition. It will trigger a call to the + OnDataLoss API of the partition. + Actual data loss will depend on the specified DataLossMode. + + + * PartialDataLoss - Only a quorum of replicas are removed and OnDataLoss is triggered for the + partition but actual data loss depends on the presence of in-flight replication. + * FullDataLoss - All replicas are removed hence all data is lost and OnDataLoss is triggered. + + This API should only be called with a stateful service as the target. + + Calling this API with a system service as the target is not advised. + + Note: Once this API has been called, it cannot be reversed. Calling CancelOperation will only + stop execution and clean up internal system state. + It will not restore data if the command has progressed far enough to cause data loss. + + Call the GetDataLossProgress API with the same OperationId to return information on the + operation started with this API. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param data_loss_mode: This enum is passed to the StartDataLoss API to indicate what type of + data loss to induce. + :type data_loss_mode: str or ~azure.servicefabric.models.DataLossMode + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_data_loss.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['DataLossMode'] = self._serialize.query("data_loss_mode", data_loss_mode, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_data_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartDataLoss'} # type: ignore + + def get_data_loss_progress( + self, + service_id, # type: str + partition_id, # type: str + operation_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PartitionDataLossProgress" + """Gets the progress of a partition data loss operation started using the StartDataLoss API. + + Gets the progress of a data loss operation started with StartDataLoss, using the OperationId. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionDataLossProgress, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionDataLossProgress + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionDataLossProgress"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_data_loss_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionDataLossProgress', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_data_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetDataLossProgress'} # type: ignore + + def start_quorum_loss( + self, + service_id, # type: str + partition_id, # type: str + operation_id, # type: str + quorum_loss_mode, # type: Union[str, "_models.QuorumLossMode"] + quorum_loss_duration, # type: int + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Induces quorum loss for a given stateful service partition. + + This API is useful for a temporary quorum loss situation on your service. + + Call the GetQuorumLossProgress API with the same OperationId to return information on the + operation started with this API. + + This can only be called on stateful persisted (HasPersistedState==true) services. Do not use + this API on stateless services or stateful in-memory only services. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param quorum_loss_mode: This enum is passed to the StartQuorumLoss API to indicate what type + of quorum loss to induce. + :type quorum_loss_mode: str or ~azure.servicefabric.models.QuorumLossMode + :param quorum_loss_duration: The amount of time for which the partition will be kept in quorum + loss. This must be specified in seconds. + :type quorum_loss_duration: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_quorum_loss.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['QuorumLossMode'] = self._serialize.query("quorum_loss_mode", quorum_loss_mode, 'str') + query_parameters['QuorumLossDuration'] = self._serialize.query("quorum_loss_duration", quorum_loss_duration, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_quorum_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartQuorumLoss'} # type: ignore + + def get_quorum_loss_progress( + self, + service_id, # type: str + partition_id, # type: str + operation_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PartitionQuorumLossProgress" + """Gets the progress of a quorum loss operation on a partition started using the StartQuorumLoss API. + + Gets the progress of a quorum loss operation started with StartQuorumLoss, using the provided + OperationId. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionQuorumLossProgress, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionQuorumLossProgress + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionQuorumLossProgress"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_quorum_loss_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionQuorumLossProgress', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_quorum_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetQuorumLossProgress'} # type: ignore + + def start_partition_restart( + self, + service_id, # type: str + partition_id, # type: str + operation_id, # type: str + restart_partition_mode, # type: Union[str, "_models.RestartPartitionMode"] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """This API will restart some or all replicas or instances of the specified partition. + + This API is useful for testing failover. + + If used to target a stateless service partition, RestartPartitionMode must be + AllReplicasOrInstances. + + Call the GetPartitionRestartProgress API using the same OperationId to get the progress. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param restart_partition_mode: Describe which partitions to restart. + :type restart_partition_mode: str or ~azure.servicefabric.models.RestartPartitionMode + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_partition_restart.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['RestartPartitionMode'] = self._serialize.query("restart_partition_mode", restart_partition_mode, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_partition_restart.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartRestart'} # type: ignore + + def get_partition_restart_progress( + self, + service_id, # type: str + partition_id, # type: str + operation_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PartitionRestartProgress" + """Gets the progress of a PartitionRestart operation started using StartPartitionRestart. + + Gets the progress of a PartitionRestart started with StartPartitionRestart using the provided + OperationId. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionRestartProgress, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionRestartProgress + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionRestartProgress"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_restart_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionRestartProgress', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_restart_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetRestartProgress'} # type: ignore + + def start_node_transition( + self, + node_name, # type: str + operation_id, # type: str + node_transition_type, # type: Union[str, "_models.NodeTransitionType"] + node_instance_id, # type: str + stop_duration_in_seconds, # type: int + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Starts or stops a cluster node. + + Starts or stops a cluster node. A cluster node is a process, not the OS instance itself. To + start a node, pass in "Start" for the NodeTransitionType parameter. + To stop a node, pass in "Stop" for the NodeTransitionType parameter. This API starts the + operation - when the API returns the node may not have finished transitioning yet. + Call GetNodeTransitionProgress with the same OperationId to get the progress of the operation. + + :param node_name: The name of the node. + :type node_name: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param node_transition_type: Indicates the type of transition to perform. + NodeTransitionType.Start will start a stopped node. NodeTransitionType.Stop will stop a node + that is up. + :type node_transition_type: str or ~azure.servicefabric.models.NodeTransitionType + :param node_instance_id: The node instance ID of the target node. This can be determined + through GetNodeInfo API. + :type node_instance_id: str + :param stop_duration_in_seconds: The duration, in seconds, to keep the node stopped. The + minimum value is 600, the maximum is 14400. After this time expires, the node will + automatically come back up. + :type stop_duration_in_seconds: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.start_node_transition.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['NodeTransitionType'] = self._serialize.query("node_transition_type", node_transition_type, 'str') + query_parameters['NodeInstanceId'] = self._serialize.query("node_instance_id", node_instance_id, 'str') + query_parameters['StopDurationInSeconds'] = self._serialize.query("stop_duration_in_seconds", stop_duration_in_seconds, 'int', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + start_node_transition.metadata = {'url': '/Faults/Nodes/{nodeName}/$/StartTransition/'} # type: ignore + + def get_node_transition_progress( + self, + node_name, # type: str + operation_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.NodeTransitionProgress" + """Gets the progress of an operation started using StartNodeTransition. + + Gets the progress of an operation started with StartNodeTransition using the provided + OperationId. + + :param node_name: The name of the node. + :type node_name: str + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: NodeTransitionProgress, or the result of cls(response) + :rtype: ~azure.servicefabric.models.NodeTransitionProgress + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeTransitionProgress"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_transition_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('NodeTransitionProgress', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_transition_progress.metadata = {'url': '/Faults/Nodes/{nodeName}/$/GetTransitionProgress'} # type: ignore + + def get_fault_operation_list( + self, + type_filter=65535, # type: int + state_filter=65535, # type: int + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List["_models.OperationStatus"] + """Gets a list of user-induced fault operations filtered by provided input. + + Gets the list of user-induced fault operations filtered by provided input. + + :param type_filter: Used to filter on OperationType for user-induced operations. + + + * 65535 - select all + * 1 - select PartitionDataLoss. + * 2 - select PartitionQuorumLoss. + * 4 - select PartitionRestart. + * 8 - select NodeTransition. + :type type_filter: int + :param state_filter: Used to filter on OperationState's for user-induced operations. + + + * 65535 - select All + * 1 - select Running + * 2 - select RollingBack + * 8 - select Completed + * 16 - select Faulted + * 32 - select Cancelled + * 64 - select ForceCancelled. + :type state_filter: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of OperationStatus, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.OperationStatus] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.OperationStatus"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_fault_operation_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['TypeFilter'] = self._serialize.query("type_filter", type_filter, 'int') + query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[OperationStatus]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_fault_operation_list.metadata = {'url': '/Faults/'} # type: ignore + + def cancel_operation( + self, + operation_id, # type: str + force=False, # type: bool + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Cancels a user-induced fault operation. + + The following APIs start fault operations that may be cancelled by using CancelOperation: + StartDataLoss, StartQuorumLoss, StartPartitionRestart, StartNodeTransition. + + If force is false, then the specified user-induced operation will be gracefully stopped and + cleaned up. If force is true, the command will be aborted, and some internal state + may be left behind. Specifying force as true should be used with care. Calling this API with + force set to true is not allowed until this API has already + been called on the same test command with force set to false first, or unless the test command + already has an OperationState of OperationState.RollingBack. + Clarification: OperationState.RollingBack means that the system will be/is cleaning up internal + system state caused by executing the command. It will not restore data if the + test command was to cause data loss. For example, if you call StartDataLoss then call this + API, the system will only clean up internal state from running the command. + It will not restore the target partition's data, if the command progressed far enough to cause + data loss. + + Important note: if this API is invoked with force==true, internal state may be left behind. + + :param operation_id: A GUID that identifies a call of this API. This is passed into the + corresponding GetProgress API. + :type operation_id: str + :param force: Indicates whether to gracefully roll back and clean up internal system state + modified by executing the user-induced operation. + :type force: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.cancel_operation.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['Force'] = self._serialize.query("force", force, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + cancel_operation.metadata = {'url': '/Faults/$/Cancel'} # type: ignore + + def create_backup_policy( + self, + backup_policy_description, # type: "_models.BackupPolicyDescription" + timeout=60, # type: Optional[int] + validate_connection=False, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates a backup policy. + + Creates a backup policy which can be associated later with a Service Fabric application, + service or a partition for periodic backup. + + :param backup_policy_description: Describes the backup policy. + :type backup_policy_description: ~azure.servicefabric.models.BackupPolicyDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param validate_connection: Specifies whether to validate the storage connection and + credentials before creating or updating the backup policies. + :type validate_connection: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_backup_policy.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if validate_connection is not None: + query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/$/Create'} # type: ignore + + def delete_backup_policy( + self, + backup_policy_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes the backup policy. + + Deletes an existing backup policy. A backup policy must be created before it can be deleted. A + currently active backup policy, associated with any Service Fabric application, service or + partition, cannot be deleted without first deleting the mapping. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_backup_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Delete'} # type: ignore + + def get_backup_policy_list( + self, + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedBackupPolicyDescriptionList" + """Gets all the backup policies configured. + + Get a list of all the backup policies configured. + + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupPolicyDescriptionList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupPolicyDescriptionList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupPolicyDescriptionList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_backup_policy_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupPolicyDescriptionList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_backup_policy_list.metadata = {'url': '/BackupRestore/BackupPolicies'} # type: ignore + + def get_backup_policy_by_name( + self, + backup_policy_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.BackupPolicyDescription" + """Gets a particular backup policy by name. + + Gets a particular backup policy identified by {backupPolicyName}. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BackupPolicyDescription, or the result of cls(response) + :rtype: ~azure.servicefabric.models.BackupPolicyDescription + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicyDescription"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_backup_policy_by_name.metadata['url'] # type: ignore + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('BackupPolicyDescription', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_backup_policy_by_name.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}'} # type: ignore + + def get_all_entities_backed_up_by_policy( + self, + backup_policy_name, # type: str + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedBackupEntityList" + """Gets the list of backup entities that are associated with this policy. + + Returns a list of Service Fabric application, service or partition which are associated with + this backup policy. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupEntityList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupEntityList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupEntityList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_all_entities_backed_up_by_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupEntityList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_all_entities_backed_up_by_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/GetBackupEnabledEntities'} # type: ignore + + def update_backup_policy( + self, + backup_policy_name, # type: str + backup_policy_description, # type: "_models.BackupPolicyDescription" + timeout=60, # type: Optional[int] + validate_connection=False, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> None + """Updates the backup policy. + + Updates the backup policy identified by {backupPolicyName}. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param backup_policy_description: Describes the backup policy. + :type backup_policy_description: ~azure.servicefabric.models.BackupPolicyDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param validate_connection: Specifies whether to validate the storage connection and + credentials before creating or updating the backup policies. + :type validate_connection: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update_backup_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if validate_connection is not None: + query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + update_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Update'} # type: ignore + + def enable_application_backup( + self, + application_id, # type: str + backup_policy_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Enables periodic backup of stateful partitions under this Service Fabric application. + + Enables periodic backup of stateful partitions which are part of this Service Fabric + application. Each partition is backed up individually as per the specified backup policy + description. + Note only C# based Reliable Actor and Reliable Stateful services are currently supported for + periodic backup. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.enable_application_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + enable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/EnableBackup'} # type: ignore + + def disable_application_backup( + self, + application_id, # type: str + clean_backup, # type: bool + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Disables periodic backup of Service Fabric application. + + Disables periodic backup of Service Fabric application which was previously enabled. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the + backups which were created for the backup entity that is getting disabled for backup. + :type clean_backup: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.disable_application_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _disable_backup_description is not None: + body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + disable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/DisableBackup'} # type: ignore + + def get_application_backup_configuration_info( + self, + application_id, # type: str + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedBackupConfigurationInfoList" + """Gets the Service Fabric application backup configuration information. + + Gets the Service Fabric backup configuration information for the application and the services + and partitions under this application. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupConfigurationInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupConfigurationInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_backup_configuration_info.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupConfigurationInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_backup_configuration_info.metadata = {'url': '/Applications/{applicationId}/$/GetBackupConfigurationInfo'} # type: ignore + + def get_application_backup_list( + self, + application_id, # type: str + timeout=60, # type: Optional[int] + latest=False, # type: Optional[bool] + start_date_time_filter=None, # type: Optional[datetime.datetime] + end_date_time_filter=None, # type: Optional[datetime.datetime] + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedBackupInfoList" + """Gets the list of backups available for every partition in this application. + + Returns a list of backups available for every partition in this Service Fabric application. The + server enumerates all the backups available at the backup location configured in the backup + policy. It also allows filtering of the result based on start and end datetime or just fetching + the latest available backup for every partition. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup available for a partition + for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, all backups from the beginning are enumerated. + :type start_date_time_filter: ~datetime.datetime + :param end_date_time_filter: Specify the end date time till which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, enumeration is done till the most recent backup. + :type end_date_time_filter: ~datetime.datetime + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_backup_list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_backup_list.metadata = {'url': '/Applications/{applicationId}/$/GetBackups'} # type: ignore + + def suspend_application_backup( + self, + application_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Suspends periodic backup for the specified Service Fabric application. + + The application which is configured to take periodic backups, is suspended for taking further + backups till it is resumed again. This operation applies to the entire application's hierarchy. + It means all the services and partitions under this application are now suspended for backup. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.suspend_application_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + suspend_application_backup.metadata = {'url': '/Applications/{applicationId}/$/SuspendBackup'} # type: ignore + + def resume_application_backup( + self, + application_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Resumes periodic backup of a Service Fabric application which was previously suspended. + + The previously suspended Service Fabric application resumes taking periodic backup as per the + backup policy currently configured for the same. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.resume_application_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_application_backup.metadata = {'url': '/Applications/{applicationId}/$/ResumeBackup'} # type: ignore + + def enable_service_backup( + self, + service_id, # type: str + backup_policy_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Enables periodic backup of stateful partitions under this Service Fabric service. + + Enables periodic backup of stateful partitions which are part of this Service Fabric service. + Each partition is backed up individually as per the specified backup policy description. In + case the application, which the service is part of, is already enabled for backup then this + operation would override the policy being used to take the periodic backup for this service and + its partitions (unless explicitly overridden at the partition level). + Note only C# based Reliable Actor and Reliable Stateful services are currently supported for + periodic backup. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.enable_service_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + enable_service_backup.metadata = {'url': '/Services/{serviceId}/$/EnableBackup'} # type: ignore + + def disable_service_backup( + self, + service_id, # type: str + clean_backup, # type: bool + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Disables periodic backup of Service Fabric service which was previously enabled. + + Disables periodic backup of Service Fabric service which was previously enabled. Backup must be + explicitly enabled before it can be disabled. + In case the backup is enabled for the Service Fabric application, which this service is part + of, this service would continue to be periodically backed up as per the policy mapped at the + application level. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the + backups which were created for the backup entity that is getting disabled for backup. + :type clean_backup: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.disable_service_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _disable_backup_description is not None: + body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + disable_service_backup.metadata = {'url': '/Services/{serviceId}/$/DisableBackup'} # type: ignore + + def get_service_backup_configuration_info( + self, + service_id, # type: str + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedBackupConfigurationInfoList" + """Gets the Service Fabric service backup configuration information. + + Gets the Service Fabric backup configuration information for the service and the partitions + under this service. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupConfigurationInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupConfigurationInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_backup_configuration_info.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupConfigurationInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_backup_configuration_info.metadata = {'url': '/Services/{serviceId}/$/GetBackupConfigurationInfo'} # type: ignore + + def get_service_backup_list( + self, + service_id, # type: str + timeout=60, # type: Optional[int] + latest=False, # type: Optional[bool] + start_date_time_filter=None, # type: Optional[datetime.datetime] + end_date_time_filter=None, # type: Optional[datetime.datetime] + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedBackupInfoList" + """Gets the list of backups available for every partition in this service. + + Returns a list of backups available for every partition in this Service Fabric service. The + server enumerates all the backups available in the backup store configured in the backup + policy. It also allows filtering of the result based on start and end datetime or just fetching + the latest available backup for every partition. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup available for a partition + for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, all backups from the beginning are enumerated. + :type start_date_time_filter: ~datetime.datetime + :param end_date_time_filter: Specify the end date time till which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, enumeration is done till the most recent backup. + :type end_date_time_filter: ~datetime.datetime + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_backup_list.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_backup_list.metadata = {'url': '/Services/{serviceId}/$/GetBackups'} # type: ignore + + def suspend_service_backup( + self, + service_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Suspends periodic backup for the specified Service Fabric service. + + The service which is configured to take periodic backups, is suspended for taking further + backups till it is resumed again. This operation applies to the entire service's hierarchy. It + means all the partitions under this service are now suspended for backup. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.suspend_service_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + suspend_service_backup.metadata = {'url': '/Services/{serviceId}/$/SuspendBackup'} # type: ignore + + def resume_service_backup( + self, + service_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Resumes periodic backup of a Service Fabric service which was previously suspended. + + The previously suspended Service Fabric service resumes taking periodic backup as per the + backup policy currently configured for the same. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.resume_service_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_service_backup.metadata = {'url': '/Services/{serviceId}/$/ResumeBackup'} # type: ignore + + def enable_partition_backup( + self, + partition_id, # type: str + backup_policy_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Enables periodic backup of the stateful persisted partition. + + Enables periodic backup of stateful persisted partition. Each partition is backed up as per the + specified backup policy description. In case the application or service, which is partition is + part of, is already enabled for backup then this operation would override the policy being used + to take the periodic backup of this partition. + Note only C# based Reliable Actor and Reliable Stateful services are currently supported for + periodic backup. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.enable_partition_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + enable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/EnableBackup'} # type: ignore + + def disable_partition_backup( + self, + partition_id, # type: str + clean_backup, # type: bool + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Disables periodic backup of Service Fabric partition which was previously enabled. + + Disables periodic backup of partition which was previously enabled. Backup must be explicitly + enabled before it can be disabled. + In case the backup is enabled for the Service Fabric application or service, which this + partition is part of, this partition would continue to be periodically backed up as per the + policy mapped at the higher level entity. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the + backups which were created for the backup entity that is getting disabled for backup. + :type clean_backup: bool + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.disable_partition_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _disable_backup_description is not None: + body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + disable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/DisableBackup'} # type: ignore + + def get_partition_backup_configuration_info( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PartitionBackupConfigurationInfo" + """Gets the partition backup configuration information. + + Gets the Service Fabric Backup configuration information for the specified partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PartitionBackupConfigurationInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PartitionBackupConfigurationInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionBackupConfigurationInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_backup_configuration_info.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PartitionBackupConfigurationInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_backup_configuration_info.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupConfigurationInfo'} # type: ignore + + def get_partition_backup_list( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + latest=False, # type: Optional[bool] + start_date_time_filter=None, # type: Optional[datetime.datetime] + end_date_time_filter=None, # type: Optional[datetime.datetime] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedBackupInfoList" + """Gets the list of backups available for the specified partition. + + Returns a list of backups available for the specified partition. The server enumerates all the + backups available in the backup store configured in the backup policy. It also allows filtering + of the result based on start and end datetime or just fetching the latest available backup for + the partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup available for a partition + for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, all backups from the beginning are enumerated. + :type start_date_time_filter: ~datetime.datetime + :param end_date_time_filter: Specify the end date time till which to enumerate backups, in + datetime format. The date time must be specified in ISO8601 format. This is an optional + parameter. If not specified, enumeration is done till the most recent backup. + :type end_date_time_filter: ~datetime.datetime + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_backup_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_backup_list.metadata = {'url': '/Partitions/{partitionId}/$/GetBackups'} # type: ignore + + def suspend_partition_backup( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Suspends periodic backup for the specified partition. + + The partition which is configured to take periodic backups, is suspended for taking further + backups till it is resumed again. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.suspend_partition_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + suspend_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/SuspendBackup'} # type: ignore + + def resume_partition_backup( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Resumes periodic backup of partition which was previously suspended. + + The previously suspended partition resumes taking periodic backup as per the backup policy + currently configured for the same. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.resume_partition_backup.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + resume_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/ResumeBackup'} # type: ignore + + def backup_partition( + self, + partition_id, # type: str + backup_timeout=10, # type: Optional[int] + timeout=60, # type: Optional[int] + backup_storage=None, # type: Optional["_models.BackupStorageDescription"] + **kwargs # type: Any + ): + # type: (...) -> None + """Triggers backup of the partition's state. + + Creates a backup of the stateful persisted partition's state. In case the partition is already + being periodically backed up, then by default the new backup is created at the same backup + storage. One can also override the same by specifying the backup storage details as part of the + request body. Once the backup is initiated, its progress can be tracked using the + GetBackupProgress operation. + In case, the operation times out, specify a greater backup timeout value in the query + parameter. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param backup_timeout: Specifies the maximum amount of time, in minutes, to wait for the backup + operation to complete. Post that, the operation completes with timeout error. However, in + certain corner cases it could be that though the operation returns back timeout, the backup + actually goes through. In case of timeout error, its recommended to invoke this operation again + with a greater timeout value. The default value for the same is 10 minutes. + :type backup_timeout: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param backup_storage: Specifies the details of the backup storage where to save the backup. + :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _backup_partition_description = _models.BackupPartitionDescription(backup_storage=backup_storage) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.backup_partition.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if backup_timeout is not None: + query_parameters['BackupTimeout'] = self._serialize.query("backup_timeout", backup_timeout, 'int') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if _backup_partition_description is not None: + body_content = self._serialize.body(_backup_partition_description, 'BackupPartitionDescription') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + backup_partition.metadata = {'url': '/Partitions/{partitionId}/$/Backup'} # type: ignore + + def get_partition_backup_progress( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.BackupProgressInfo" + """Gets details for the latest backup triggered for this partition. + + Returns information about the state of the latest backup along with details or failure reason + in case of completion. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BackupProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.BackupProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_backup_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('BackupProgressInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_backup_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupProgress'} # type: ignore + + def restore_partition( + self, + partition_id, # type: str + restore_partition_description, # type: "_models.RestorePartitionDescription" + restore_timeout=10, # type: Optional[int] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Triggers restore of the state of the partition using the specified restore partition description. + + Restores the state of a of the stateful persisted partition using the specified backup point. + In case the partition is already being periodically backed up, then by default the backup point + is looked for in the storage specified in backup policy. One can also override the same by + specifying the backup storage details as part of the restore partition description in body. + Once the restore is initiated, its progress can be tracked using the GetRestoreProgress + operation. + In case, the operation times out, specify a greater restore timeout value in the query + parameter. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param restore_partition_description: Describes the parameters to restore the partition. + :type restore_partition_description: ~azure.servicefabric.models.RestorePartitionDescription + :param restore_timeout: Specifies the maximum amount of time to wait, in minutes, for the + restore operation to complete. Post that, the operation returns back with timeout error. + However, in certain corner cases it could be that the restore operation goes through even + though it completes with timeout. In case of timeout error, its recommended to invoke this + operation again with a greater timeout value. the default value for the same is 10 minutes. + :type restore_timeout: int + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.restore_partition.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if restore_timeout is not None: + query_parameters['RestoreTimeout'] = self._serialize.query("restore_timeout", restore_timeout, 'int') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(restore_partition_description, 'RestorePartitionDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + restore_partition.metadata = {'url': '/Partitions/{partitionId}/$/Restore'} # type: ignore + + def get_partition_restore_progress( + self, + partition_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.RestoreProgressInfo" + """Gets details for the latest restore operation triggered for this partition. + + Returns information about the state of the latest restore operation along with details or + failure reason in case of completion. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: RestoreProgressInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.RestoreProgressInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.RestoreProgressInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_restore_progress.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('RestoreProgressInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_restore_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetRestoreProgress'} # type: ignore + + def get_backups_from_backup_location( + self, + get_backup_by_storage_query_description, # type: "_models.GetBackupByStorageQueryDescription" + timeout=60, # type: Optional[int] + continuation_token_parameter=None, # type: Optional[str] + max_results=0, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedBackupInfoList" + """Gets the list of backups available for the specified backed up entity at the specified backup location. + + Gets the list of backups available for the specified backed up entity (Application, Service or + Partition) at the specified backup location (FileShare or Azure Blob Storage). + + :param get_backup_by_storage_query_description: Describes the filters and backup storage + details to be used for enumerating backups. + :type get_backup_by_storage_query_description: ~azure.servicefabric.models.GetBackupByStorageQueryDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param max_results: The maximum number of results to be returned as part of the paged queries. + This parameter defines the upper bound on the number of results returned. The results returned + can be less than the specified maximum results if they do not fit in the message as per the max + message size restrictions defined in the configuration. If this parameter is zero or not + specified, the paged query includes as many results as possible that fit in the return message. + :type max_results: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedBackupInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedBackupInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.get_backups_from_backup_location.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(get_backup_by_storage_query_description, 'GetBackupByStorageQueryDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_backups_from_backup_location.metadata = {'url': '/BackupRestore/$/GetBackups'} # type: ignore + + def create_name( + self, + name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates a Service Fabric name. + + Creates the specified Service Fabric name. + + :param name: The Service Fabric name, including the 'fabric:' URI scheme. + :type name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _name_description = _models.NameDescription(name=name) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.create_name.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_name_description, 'NameDescription') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + create_name.metadata = {'url': '/Names/$/Create'} # type: ignore + + def get_name_exists_info( + self, + name_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Returns whether the Service Fabric name exists. + + Returns whether the specified Service Fabric name exists. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_name_exists_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + get_name_exists_info.metadata = {'url': '/Names/{nameId}'} # type: ignore + + def delete_name( + self, + name_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes a Service Fabric name. + + Deletes the specified Service Fabric name. A name must be created before it can be deleted. + Deleting a name with child properties will fail. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_name.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_name.metadata = {'url': '/Names/{nameId}'} # type: ignore + + def get_sub_name_info_list( + self, + name_id, # type: str + recursive=False, # type: Optional[bool] + continuation_token_parameter=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedSubNameInfoList" + """Enumerates all the Service Fabric names under a given name. + + Enumerates all the Service Fabric names under a given name. If the subnames do not fit in a + page, one page of results is returned as well as a continuation token, which can be used to get + the next page. Querying a name that doesn't exist will fail. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param recursive: Allows specifying that the search performed should be recursive. + :type recursive: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedSubNameInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedSubNameInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSubNameInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_sub_name_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if recursive is not None: + query_parameters['Recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedSubNameInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_sub_name_info_list.metadata = {'url': '/Names/{nameId}/$/GetSubNames'} # type: ignore + + def get_property_info_list( + self, + name_id, # type: str + include_values=False, # type: Optional[bool] + continuation_token_parameter=None, # type: Optional[str] + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PagedPropertyInfoList" + """Gets information on all Service Fabric properties under a given name. + + A Service Fabric name can have one or more named properties that store custom information. This + operation gets the information about these properties in a paged list. The information includes + name, value, and metadata about each of the properties. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param include_values: Allows specifying whether to include the values of the properties + returned. True if values should be returned with the metadata; False to return only property + metadata. + :type include_values: bool + :param continuation_token_parameter: The continuation token parameter is used to obtain next + set of results. A continuation token with a non-empty value is included in the response of the + API when the results from the system do not fit in a single response. When this value is passed + to the next API call, the API returns next set of results. If there are no further results, + then the continuation token does not contain a value. The value of this parameter should not be + URL encoded. + :type continuation_token_parameter: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PagedPropertyInfoList, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PagedPropertyInfoList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedPropertyInfoList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_property_info_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if include_values is not None: + query_parameters['IncludeValues'] = self._serialize.query("include_values", include_values, 'bool') + if continuation_token_parameter is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PagedPropertyInfoList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_property_info_list.metadata = {'url': '/Names/{nameId}/$/GetProperties'} # type: ignore + + def put_property( + self, + name_id, # type: str + property_description, # type: "_models.PropertyDescription" + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates or updates a Service Fabric property. + + Creates or updates the specified Service Fabric property under a given name. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param property_description: Describes the Service Fabric property to be created. + :type property_description: ~azure.servicefabric.models.PropertyDescription + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.put_property.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(property_description, 'PropertyDescription') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + put_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore + + def get_property_info( + self, + name_id, # type: str + property_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.PropertyInfo" + """Gets the specified Service Fabric property. + + Gets the specified Service Fabric property under a given name. This will always return both + value and metadata. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param property_name: Specifies the name of the property to get. + :type property_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PropertyInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.PropertyInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PropertyInfo"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_property_info.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('PropertyInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_property_info.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore + + def delete_property( + self, + name_id, # type: str + property_name, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes the specified Service Fabric property. + + Deletes the specified Service Fabric property under a given name. A property must be created + before it can be deleted. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param property_name: Specifies the name of the property to get. + :type property_name: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.delete_property.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore + + def submit_property_batch( + self, + name_id, # type: str + timeout=60, # type: Optional[int] + operations=None, # type: Optional[List["_models.PropertyBatchOperation"]] + **kwargs # type: Any + ): + # type: (...) -> Union["_models.SuccessfulPropertyBatchInfo", "_models.FailedPropertyBatchInfo"] + """Submits a property batch. + + Submits a batch of property operations. Either all or none of the operations will be committed. + + :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param operations: A list of the property batch operations to be executed. + :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SuccessfulPropertyBatchInfo or FailedPropertyBatchInfo, or the result of cls(response) + :rtype: ~azure.servicefabric.models.SuccessfulPropertyBatchInfo or ~azure.servicefabric.models.FailedPropertyBatchInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.SuccessfulPropertyBatchInfo", "_models.FailedPropertyBatchInfo"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _property_batch_description_list = _models.PropertyBatchDescriptionList(operations=operations) + api_version = "8.0" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.submit_property_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_property_batch_description_list, 'PropertyBatchDescriptionList') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 409]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + if response.status_code == 200: + deserialized = self._deserialize('SuccessfulPropertyBatchInfo', pipeline_response) + + if response.status_code == 409: + deserialized = self._deserialize('FailedPropertyBatchInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + submit_property_batch.metadata = {'url': '/Names/{nameId}/$/GetProperties/$/SubmitBatch'} # type: ignore + + def get_cluster_event_list( + self, + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ClusterEvent"] + """Gets all Cluster-related events. + + The response is list of ClusterEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ClusterEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ClusterEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ClusterEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_cluster_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ClusterEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_cluster_event_list.metadata = {'url': '/EventsStore/Cluster/Events'} # type: ignore + + def get_containers_event_list( + self, + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ContainerInstanceEvent"] + """Gets all Containers-related events. + + The response is list of ContainerInstanceEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ContainerInstanceEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ContainerInstanceEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ContainerInstanceEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_containers_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ContainerInstanceEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_containers_event_list.metadata = {'url': '/EventsStore/Containers/Events'} # type: ignore + + def get_node_event_list( + self, + node_name, # type: str + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.NodeEvent"] + """Gets a Node-related events. + + The response is list of NodeEvent objects. + + :param node_name: The name of the node. + :type node_name: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of NodeEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.NodeEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.NodeEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_node_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[NodeEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_node_event_list.metadata = {'url': '/EventsStore/Nodes/{nodeName}/$/Events'} # type: ignore + + def get_nodes_event_list( + self, + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.NodeEvent"] + """Gets all Nodes-related Events. + + The response is list of NodeEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of NodeEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.NodeEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.NodeEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_nodes_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[NodeEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_nodes_event_list.metadata = {'url': '/EventsStore/Nodes/Events'} # type: ignore + + def get_application_event_list( + self, + application_id, # type: str + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ApplicationEvent"] + """Gets an Application-related events. + + The response is list of ApplicationEvent objects. + + :param application_id: The identity of the application. This is typically the full name of the + application without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the application name is "fabric:/myapp/app1", the application identity would + be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :type application_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ApplicationEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ApplicationEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_application_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ApplicationEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_application_event_list.metadata = {'url': '/EventsStore/Applications/{applicationId}/$/Events'} # type: ignore + + def get_applications_event_list( + self, + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ApplicationEvent"] + """Gets all Applications-related events. + + The response is list of ApplicationEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ApplicationEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ApplicationEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_applications_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ApplicationEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_applications_event_list.metadata = {'url': '/EventsStore/Applications/Events'} # type: ignore + + def get_service_event_list( + self, + service_id, # type: str + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ServiceEvent"] + """Gets a Service-related events. + + The response is list of ServiceEvent objects. + + :param service_id: The identity of the service. This ID is typically the full name of the + service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be + "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. + :type service_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ServiceEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ServiceEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_service_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ServiceEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_event_list.metadata = {'url': '/EventsStore/Services/{serviceId}/$/Events'} # type: ignore + + def get_services_event_list( + self, + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ServiceEvent"] + """Gets all Services-related events. + + The response is list of ServiceEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ServiceEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ServiceEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_services_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ServiceEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_services_event_list.metadata = {'url': '/EventsStore/Services/Events'} # type: ignore + + def get_partition_event_list( + self, + partition_id, # type: str + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.PartitionEvent"] + """Gets a Partition-related events. + + The response is list of PartitionEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of PartitionEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.PartitionEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PartitionEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[PartitionEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Events'} # type: ignore + + def get_partitions_event_list( + self, + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.PartitionEvent"] + """Gets all Partitions-related events. + + The response is list of PartitionEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of PartitionEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.PartitionEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PartitionEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partitions_event_list.metadata['url'] # type: ignore + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[PartitionEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partitions_event_list.metadata = {'url': '/EventsStore/Partitions/Events'} # type: ignore + + def get_partition_replica_event_list( + self, + partition_id, # type: str + replica_id, # type: str + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ReplicaEvent"] + """Gets a Partition Replica-related events. + + The response is list of ReplicaEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ReplicaEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ReplicaEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ReplicaEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_replica_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ReplicaEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_replica_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/{replicaId}/$/Events'} # type: ignore + + def get_partition_replicas_event_list( + self, + partition_id, # type: str + start_time_utc, # type: str + end_time_utc, # type: str + timeout=60, # type: Optional[int] + events_types_filter=None, # type: Optional[str] + exclude_analysis_events=None, # type: Optional[bool] + skip_correlation_lookup=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> List["_models.ReplicaEvent"] + """Gets all Replicas-related events for a Partition. + + The response is list of ReplicaEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string specifying the types of + FabricEvents that should only be included in the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is + passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information + if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field + in every FabricEvent gets populated. + :type skip_correlation_lookup: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of ReplicaEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.ReplicaEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ReplicaEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_partition_replicas_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[ReplicaEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_partition_replicas_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/Events'} # type: ignore + + def get_correlated_event_list( + self, + event_instance_id, # type: str + timeout=60, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List["_models.FabricEvent"] + """Gets all correlated events for a given event. + + The response is list of FabricEvents. + + :param event_instance_id: The EventInstanceId. + :type event_instance_id: str + :param timeout: The server timeout for performing the operation in seconds. This timeout + specifies the time duration that the client is willing to wait for the requested operation to + complete. The default value for this parameter is 60 seconds. + :type timeout: long + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of FabricEvent, or the result of cls(response) + :rtype: list[~azure.servicefabric.models.FabricEvent] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricEvent"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "8.0" + accept = "application/json" + + # Construct URL + url = self.get_correlated_event_list.metadata['url'] # type: ignore + path_format_arguments = { + 'eventInstanceId': self._serialize.url("event_instance_id", event_instance_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.FabricError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('[FabricEvent]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_correlated_event_list.metadata = {'url': '/EventsStore/CorrelatedEvents/{eventInstanceId}/$/Events'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/py.typed b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file From 68bfd1616c1ab0482f12edaacf545af3d5c04d9c Mon Sep 17 00:00:00 2001 From: Azure SDK Bot Date: Mon, 3 May 2021 18:19:32 +0000 Subject: [PATCH 2/6] Packaging update of azure-servicefabric --- sdk/servicefabric/azure-servicefabric/README.md | 2 +- sdk/servicefabric/azure-servicefabric/setup.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/sdk/servicefabric/azure-servicefabric/README.md b/sdk/servicefabric/azure-servicefabric/README.md index 89cfbd8ea380..bfc167c87a82 100644 --- a/sdk/servicefabric/azure-servicefabric/README.md +++ b/sdk/servicefabric/azure-servicefabric/README.md @@ -17,7 +17,7 @@ For code examples, see [Service Fabric](https://docs.microsoft.com/python/api/ov If you encounter any bugs or have suggestions, please file an issue in the [Issues](https://github.com/Azure/azure-sdk-for-python/issues) -section of the project. +section of the project. ![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-servicefabric%2FREADME.png) diff --git a/sdk/servicefabric/azure-servicefabric/setup.py b/sdk/servicefabric/azure-servicefabric/setup.py index f7e4f79eb804..3ca4444ff502 100644 --- a/sdk/servicefabric/azure-servicefabric/setup.py +++ b/sdk/servicefabric/azure-servicefabric/setup.py @@ -70,6 +70,7 @@ 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', 'License :: OSI Approved :: MIT License', ], zip_safe=False, @@ -79,7 +80,7 @@ 'azure', ]), install_requires=[ - 'msrest>=0.5.0', + 'msrest>=0.6.21', 'azure-common~=1.1', ], extras_require={ From 5fceb9c6850ab055dc153eeb3d3e0941f96fe158 Mon Sep 17 00:00:00 2001 From: Travis Prescott Date: Mon, 3 May 2021 11:21:44 -0700 Subject: [PATCH 3/6] Add CHANGELOG entry. --- .../azure-servicefabric/CHANGELOG.md | 286 ++++++++++++++++++ 1 file changed, 286 insertions(+) diff --git a/sdk/servicefabric/azure-servicefabric/CHANGELOG.md b/sdk/servicefabric/azure-servicefabric/CHANGELOG.md index 9110e3ba59da..728b99540408 100644 --- a/sdk/servicefabric/azure-servicefabric/CHANGELOG.md +++ b/sdk/servicefabric/azure-servicefabric/CHANGELOG.md @@ -1,5 +1,291 @@ # Release History +## 8.0.0.0 (2021-05-03) + +**Features** + + - Model ServiceDescription has a new parameter tags_required_to_run + - Model ServiceDescription has a new parameter tags_required_to_place + - Model NodeInfo has a new parameter node_tags + - Model StatefulServiceDescription has a new parameter tags_required_to_run + - Model StatefulServiceDescription has a new parameter tags_required_to_place + - Model StatefulServiceDescription has a new parameter replica_lifecycle_description + - Model ApplicationInfo has a new parameter managed_application_identity + - Model ClusterHealthPolicy has a new parameter node_type_health_policy_map + - Model StatelessServiceDescription has a new parameter instance_lifecycle_description + - Model StatelessServiceDescription has a new parameter tags_required_to_run + - Model StatelessServiceDescription has a new parameter tags_required_to_place + - Model StatelessServiceDescription has a new parameter instance_restart_wait_duration_seconds + - Model ApplicationUpgradeDescription has a new parameter managed_application_identity + - Model StatelessServiceUpdateDescription has a new parameter tags_for_placement + - Model StatelessServiceUpdateDescription has a new parameter service_dns_name + - Model StatelessServiceUpdateDescription has a new parameter instance_restart_wait_duration_seconds + - Model StatelessServiceUpdateDescription has a new parameter instance_lifecycle_description + - Model StatelessServiceUpdateDescription has a new parameter tags_for_running + - Model ServiceUpdateDescription has a new parameter tags_for_placement + - Model ServiceUpdateDescription has a new parameter tags_for_running + - Model ServiceUpdateDescription has a new parameter service_dns_name + - Model StatefulServiceUpdateDescription has a new parameter replica_lifecycle_description + - Model StatefulServiceUpdateDescription has a new parameter tags_for_placement + - Model StatefulServiceUpdateDescription has a new parameter tags_for_running + - Model StatefulServiceUpdateDescription has a new parameter service_dns_name + - Added operation ServiceFabricClientAPIsOperationsMixin.add_node_tags + - Added operation ServiceFabricClientAPIsOperationsMixin.get_loaded_partition_info_list + - Added operation ServiceFabricClientAPIsOperationsMixin.remove_node_tags + - Added operation ServiceFabricClientAPIsOperationsMixin.move_instance + +**Breaking changes** + + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_type_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_backup_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_health_using_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_health_using_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_backup_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_type_info_list_by_name has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_health_using_policy has a new signature + - Operation MeshApplicationOperations.create_or_update has a new signature + - Operation MeshApplicationOperations.delete has a new signature + - Operation MeshApplicationOperations.get has a new signature + - Operation MeshApplicationOperations.get_upgrade_progress has a new signature + - Operation MeshCodePackageOperations.get_container_logs has a new signature + - Operation MeshGatewayOperations.create_or_update has a new signature + - Operation MeshGatewayOperations.delete has a new signature + - Operation MeshGatewayOperations.get has a new signature + - Operation MeshNetworkOperations.create_or_update has a new signature + - Operation MeshNetworkOperations.delete has a new signature + - Operation MeshNetworkOperations.get has a new signature + - Operation MeshSecretOperations.create_or_update has a new signature + - Operation MeshSecretOperations.delete has a new signature + - Operation MeshSecretOperations.get has a new signature + - Operation MeshSecretValueOperations.add_value has a new signature + - Operation MeshSecretValueOperations.delete has a new signature + - Operation MeshSecretValueOperations.get has a new signature + - Operation MeshSecretValueOperations.list has a new signature + - Operation MeshSecretValueOperations.show has a new signature + - Operation MeshServiceOperations.get has a new signature + - Operation MeshServiceOperations.list has a new signature + - Operation MeshServiceReplicaOperations.get has a new signature + - Operation MeshServiceReplicaOperations.list has a new signature + - Operation MeshVolumeOperations.create_or_update has a new signature + - Operation MeshVolumeOperations.delete has a new signature + - Operation MeshVolumeOperations.get has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.add_configuration_parameter_overrides has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.backup_partition has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.cancel_operation has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.cancel_repair_task has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.commit_image_store_upload_session has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.copy_image_store_content has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.create_application has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.create_compose_deployment has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.create_name has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.create_repair_task has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.create_service has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.create_service_from_template has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.delete_application has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.delete_backup_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.delete_image_store_content has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.delete_image_store_upload_session has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.delete_name has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.delete_property has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.delete_repair_task has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.delete_service has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.deploy_service_package_to_node has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.disable_application_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.disable_node has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.disable_partition_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.disable_service_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.enable_application_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.enable_node has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.enable_partition_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.enable_service_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.force_approve_repair_task has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_aad_metadata has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_backup_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_load_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_manifest has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_name_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_type_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_type_info_list_by_name has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_applications_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_backup_policy_by_name has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_chaos has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_chaos_schedule has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_configuration has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_configuration_upgrade_status has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_health_chunk has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_health_using_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_load has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_manifest has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_upgrade_progress has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_version has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_compose_deployment_status has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_compose_deployment_upgrade_progress has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_configuration_overrides has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_container_logs_deployed_on_node has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_containers_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_correlated_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_data_loss_progress has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_application_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_application_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_code_package_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_info_list_by_name has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_replica_detail_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_replica_detail_info_by_partition_id has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_replica_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_type_info_by_name has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_type_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_fault_operation_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_content has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_folder_size has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_root_content has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_root_folder_size has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_upload_session_by_id has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_upload_session_by_path has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_name_exists_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_node_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_node_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_node_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_node_load_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_node_transition_progress has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_nodes_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_backup_configuration_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_backup_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_backup_progress has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_load_information has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_replica_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_replicas_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_restart_progress has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_restore_progress has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partitions_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_property_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_provisioned_fabric_code_version_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_provisioned_fabric_config_version_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_quorum_loss_progress has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_repair_task_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_backup_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_description has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_manifest has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_name_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_type_info_by_name has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_type_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_services_event_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_unplaced_replica_information has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_upgrade_orchestration_service_state has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.invoke_container_api has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.invoke_infrastructure_command has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.invoke_infrastructure_query has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.move_primary_replica has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.move_secondary_replica has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.post_chaos_schedule has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.provision_application_type has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.provision_cluster has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.put_property has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.recover_all_partitions has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.recover_partition has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.recover_service_partitions has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.recover_system_partitions has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.remove_compose_deployment has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.remove_configuration_overrides has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.remove_node_state has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.remove_replica has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.report_application_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.report_cluster_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.report_deployed_application_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.report_deployed_service_package_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.report_node_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.report_partition_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.report_replica_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.report_service_health has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.reset_partition_load has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.resolve_service has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.restart_deployed_code_package has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.restart_replica has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.restore_partition has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.resume_application_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.resume_application_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.resume_cluster_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.resume_partition_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.resume_service_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.rollback_application_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.rollback_cluster_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.set_upgrade_orchestration_service_state has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_application_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_chaos has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_cluster_configuration_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_cluster_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_compose_deployment_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_data_loss has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_node_transition has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_partition_restart has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_quorum_loss has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.start_rollback_compose_deployment_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.stop_chaos has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.submit_property_batch has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.suspend_application_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.suspend_partition_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.suspend_service_backup has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.toggle_verbose_service_placement_health_reporting has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.unprovision_application_type has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.unprovision_cluster has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.update_application_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.update_cluster_upgrade has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.update_repair_execution_state has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.update_repair_task_health_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.update_service has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.upload_file has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.upload_file_chunk has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_health_using_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_health_using_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_health_using_policy has a new signature + - Operation MeshApplicationOperations.list has a new signature + - Operation MeshSecretOperations.list has a new signature + - Operation MeshNetworkOperations.list has a new signature + - Operation MeshVolumeOperations.list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_sub_name_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.update_backup_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_backups_from_backup_location has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_backup_configuration_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.restart_node has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_application_health_using_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_node_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_application_backup_configuration_info has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_application_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_health_using_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_backup_policy_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_all_entities_backed_up_by_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.create_backup_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_health_chunk_using_policy_and_advanced_filters has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_property_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_health_using_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_compose_deployment_status_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_service_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_info_list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.update_partition_load has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_node_health_using_policy has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.get_chaos_events has a new signature + - Operation MeshGatewayOperations.list has a new signature + - Model AverageServiceLoadScalingTrigger has a new required parameter use_only_primary_load + ## 7.2.0.46 (2020-10-29) **Features** From 25663a0ab9d12ab994bdc6c81c1efa09116bed93 Mon Sep 17 00:00:00 2001 From: Travis Prescott Date: Mon, 3 May 2021 11:41:13 -0700 Subject: [PATCH 4/6] Regenerate with track 1 settings. --- .../azure/servicefabric/__init__.py | 20 +- .../azure/servicefabric/_configuration.py | 73 +- .../_service_fabric_client_ap_is.py | 88 + .../_service_fabric_client_apis.py | 123 - .../azure/servicefabric/aio/__init__.py | 10 - .../azure/servicefabric/aio/_configuration.py | 62 - .../aio/_service_fabric_client_apis.py | 116 - .../servicefabric/aio/operations/__init__.py | 31 - .../_mesh_application_operations.py | 329 - .../_mesh_code_package_operations.py | 114 - .../operations/_mesh_gateway_operations.py | 271 - .../operations/_mesh_network_operations.py | 276 - .../aio/operations/_mesh_secret_operations.py | 276 - .../_mesh_secret_value_operations.py | 360 - .../operations/_mesh_service_operations.py | 158 - .../_mesh_service_replica_operations.py | 166 - .../aio/operations/_mesh_volume_operations.py | 271 - .../_service_fabric_client_apis_operations.py | 16649 ------------ .../azure/servicefabric/models/__init__.py | 1274 +- .../azure/servicefabric/models/_models.py | 20031 +++++++------- .../azure/servicefabric/models/_models_py3.py | 21615 ++++++---------- .../_service_fabric_client_ap_is_enums.py | 1094 + .../_service_fabric_client_apis_enums.py | 2092 -- .../servicefabric/operations/__init__.py | 11 +- .../_mesh_application_operations.py | 369 +- .../_mesh_code_package_operations.py | 106 +- .../operations/_mesh_gateway_operations.py | 308 +- .../operations/_mesh_network_operations.py | 310 +- .../operations/_mesh_secret_operations.py | 303 +- .../_mesh_secret_value_operations.py | 402 +- .../operations/_mesh_service_operations.py | 169 +- .../_mesh_service_replica_operations.py | 167 +- .../operations/_mesh_volume_operations.py | 303 +- ..._service_fabric_client_ap_is_operations.py | 16732 ++++++++++++ .../_service_fabric_client_apis_operations.py | 16866 ------------ .../azure/servicefabric/py.typed | 1 - .../servicefabric/{_version.py => version.py} | 8 +- 37 files changed, 37551 insertions(+), 64003 deletions(-) create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_apis.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/__init__.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_configuration.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_service_fabric_client_apis.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/__init__.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_application_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_code_package_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_gateway_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_network_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_value_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_replica_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_volume_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_service_fabric_client_apis_operations.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_ap_is_enums.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_apis_enums.py create mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_ap_is_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_apis_operations.py delete mode 100644 sdk/servicefabric/azure-servicefabric/azure/servicefabric/py.typed rename sdk/servicefabric/azure-servicefabric/azure/servicefabric/{_version.py => version.py} (88%) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/__init__.py index 86d9a940f45c..4fb9457e85d0 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/__init__.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/__init__.py @@ -1,19 +1,19 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from ._service_fabric_client_apis import ServiceFabricClientAPIs -from ._version import VERSION +from ._configuration import ServiceFabricClientAPIsConfiguration +from ._service_fabric_client_ap_is import ServiceFabricClientAPIs +__all__ = ['ServiceFabricClientAPIs', 'ServiceFabricClientAPIsConfiguration'] + +from .version import VERSION __version__ = VERSION -__all__ = ['ServiceFabricClientAPIs'] -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_configuration.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_configuration.py index 56bf252d290d..f742cea0cdb9 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_configuration.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_configuration.py @@ -1,66 +1,43 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING +from msrest import Configuration -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from ._version import VERSION - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - - from azure.core.credentials import TokenCredential +from .version import VERSION class ServiceFabricClientAPIsConfiguration(Configuration): - """Configuration for ServiceFabricClientAPIs. - + """Configuration for ServiceFabricClientAPIs Note that all parameters used to create this instance are saved as instance attributes. - :param credential: Credential needed for the client to connect to Azure. - :type credential: ~azure.core.credentials.TokenCredential + :param credentials: Subscription credentials which uniquely identify + client subscription. + :type credentials: None + :param str base_url: Service URL """ def __init__( - self, - credential, # type: "TokenCredential" - **kwargs # type: Any - ): - # type: (...) -> None - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - super(ServiceFabricClientAPIsConfiguration, self).__init__(**kwargs) + self, credentials, base_url=None): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if not base_url: + base_url = 'http://localhost:19080' + + super(ServiceFabricClientAPIsConfiguration, self).__init__(base_url) + + # Starting Autorest.Python 4.0.64, make connection pool activated by default + self.keep_alive = True - self.credential = credential - self.api_version = "8.0" - self.credential_scopes = kwargs.pop('credential_scopes', []) - kwargs.setdefault('sdk_moniker', 'servicefabric/{}'.format(VERSION)) - self._configure(**kwargs) + self.add_user_agent('azure-servicefabric/{}'.format(VERSION)) - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') - if not self.credential_scopes and not self.authentication_policy: - raise ValueError("You must provide either credential_scopes or authentication_policy as kwargs") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + self.credentials = credentials diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py new file mode 100644 index 000000000000..4e36febb3b03 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.service_client import SDKClient +from msrest import Serializer, Deserializer + +from ._configuration import ServiceFabricClientAPIsConfiguration +from .operations import ServiceFabricClientAPIsOperationsMixin +from .operations import MeshSecretOperations +from .operations import MeshSecretValueOperations +from .operations import MeshVolumeOperations +from .operations import MeshNetworkOperations +from .operations import MeshApplicationOperations +from .operations import MeshServiceOperations +from .operations import MeshCodePackageOperations +from .operations import MeshServiceReplicaOperations +from .operations import MeshGatewayOperations +from . import models + + +class ServiceFabricClientAPIs(ServiceFabricClientAPIsOperationsMixin, SDKClient): + """Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services. + + :ivar config: Configuration for client. + :vartype config: ServiceFabricClientAPIsConfiguration + + :ivar mesh_secret: MeshSecret operations + :vartype mesh_secret: azure.servicefabric.operations.MeshSecretOperations + :ivar mesh_secret_value: MeshSecretValue operations + :vartype mesh_secret_value: azure.servicefabric.operations.MeshSecretValueOperations + :ivar mesh_volume: MeshVolume operations + :vartype mesh_volume: azure.servicefabric.operations.MeshVolumeOperations + :ivar mesh_network: MeshNetwork operations + :vartype mesh_network: azure.servicefabric.operations.MeshNetworkOperations + :ivar mesh_application: MeshApplication operations + :vartype mesh_application: azure.servicefabric.operations.MeshApplicationOperations + :ivar mesh_service: MeshService operations + :vartype mesh_service: azure.servicefabric.operations.MeshServiceOperations + :ivar mesh_code_package: MeshCodePackage operations + :vartype mesh_code_package: azure.servicefabric.operations.MeshCodePackageOperations + :ivar mesh_service_replica: MeshServiceReplica operations + :vartype mesh_service_replica: azure.servicefabric.operations.MeshServiceReplicaOperations + :ivar mesh_gateway: MeshGateway operations + :vartype mesh_gateway: azure.servicefabric.operations.MeshGatewayOperations + + :param credentials: Subscription credentials which uniquely identify + client subscription. + :type credentials: None + :param str base_url: Service URL + """ + + def __init__( + self, credentials, base_url=None): + + self.config = ServiceFabricClientAPIsConfiguration(credentials, base_url) + super(ServiceFabricClientAPIs, self).__init__(self.config.credentials, self.config) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '8.0' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.mesh_secret = MeshSecretOperations( + self._client, self.config, self._serialize, self._deserialize) + self.mesh_secret_value = MeshSecretValueOperations( + self._client, self.config, self._serialize, self._deserialize) + self.mesh_volume = MeshVolumeOperations( + self._client, self.config, self._serialize, self._deserialize) + self.mesh_network = MeshNetworkOperations( + self._client, self.config, self._serialize, self._deserialize) + self.mesh_application = MeshApplicationOperations( + self._client, self.config, self._serialize, self._deserialize) + self.mesh_service = MeshServiceOperations( + self._client, self.config, self._serialize, self._deserialize) + self.mesh_code_package = MeshCodePackageOperations( + self._client, self.config, self._serialize, self._deserialize) + self.mesh_service_replica = MeshServiceReplicaOperations( + self._client, self.config, self._serialize, self._deserialize) + self.mesh_gateway = MeshGatewayOperations( + self._client, self.config, self._serialize, self._deserialize) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_apis.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_apis.py deleted file mode 100644 index d0d6922f36f3..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_apis.py +++ /dev/null @@ -1,123 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Optional - - from azure.core.credentials import TokenCredential - from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from ._configuration import ServiceFabricClientAPIsConfiguration -from .operations import ServiceFabricClientAPIsOperationsMixin -from .operations import MeshSecretOperations -from .operations import MeshSecretValueOperations -from .operations import MeshVolumeOperations -from .operations import MeshNetworkOperations -from .operations import MeshApplicationOperations -from .operations import MeshServiceOperations -from .operations import MeshCodePackageOperations -from .operations import MeshServiceReplicaOperations -from .operations import MeshGatewayOperations -from . import models - - -class ServiceFabricClientAPIs(ServiceFabricClientAPIsOperationsMixin): - """Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services. - - :ivar mesh_secret: MeshSecretOperations operations - :vartype mesh_secret: azure.servicefabric.operations.MeshSecretOperations - :ivar mesh_secret_value: MeshSecretValueOperations operations - :vartype mesh_secret_value: azure.servicefabric.operations.MeshSecretValueOperations - :ivar mesh_volume: MeshVolumeOperations operations - :vartype mesh_volume: azure.servicefabric.operations.MeshVolumeOperations - :ivar mesh_network: MeshNetworkOperations operations - :vartype mesh_network: azure.servicefabric.operations.MeshNetworkOperations - :ivar mesh_application: MeshApplicationOperations operations - :vartype mesh_application: azure.servicefabric.operations.MeshApplicationOperations - :ivar mesh_service: MeshServiceOperations operations - :vartype mesh_service: azure.servicefabric.operations.MeshServiceOperations - :ivar mesh_code_package: MeshCodePackageOperations operations - :vartype mesh_code_package: azure.servicefabric.operations.MeshCodePackageOperations - :ivar mesh_service_replica: MeshServiceReplicaOperations operations - :vartype mesh_service_replica: azure.servicefabric.operations.MeshServiceReplicaOperations - :ivar mesh_gateway: MeshGatewayOperations operations - :vartype mesh_gateway: azure.servicefabric.operations.MeshGatewayOperations - :param credential: Credential needed for the client to connect to Azure. - :type credential: ~azure.core.credentials.TokenCredential - :param str base_url: Service URL - """ - - def __init__( - self, - credential, # type: "TokenCredential" - base_url=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - if not base_url: - base_url = 'http://localhost:19080/' - self._config = ServiceFabricClientAPIsConfiguration(credential, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.mesh_secret = MeshSecretOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_secret_value = MeshSecretValueOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_volume = MeshVolumeOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_network = MeshNetworkOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_application = MeshApplicationOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_service = MeshServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_code_package = MeshCodePackageOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_service_replica = MeshServiceReplicaOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_gateway = MeshGatewayOperations( - self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, http_request, **kwargs): - # type: (HttpRequest, Any) -> HttpResponse - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.HttpResponse - """ - http_request.url = self._client.format_url(http_request.url) - stream = kwargs.pop("stream", True) - pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> ServiceFabricClientAPIs - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/__init__.py deleted file mode 100644 index d91c5f9455ba..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_fabric_client_apis import ServiceFabricClientAPIs -__all__ = ['ServiceFabricClientAPIs'] diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_configuration.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_configuration.py deleted file mode 100644 index cbb3364c125c..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_configuration.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any, TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from .._version import VERSION - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials_async import AsyncTokenCredential - - -class ServiceFabricClientAPIsConfiguration(Configuration): - """Configuration for ServiceFabricClientAPIs. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param credential: Credential needed for the client to connect to Azure. - :type credential: ~azure.core.credentials_async.AsyncTokenCredential - """ - - def __init__( - self, - credential: "AsyncTokenCredential", - **kwargs: Any - ) -> None: - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - super(ServiceFabricClientAPIsConfiguration, self).__init__(**kwargs) - - self.credential = credential - self.api_version = "8.0" - self.credential_scopes = kwargs.pop('credential_scopes', []) - kwargs.setdefault('sdk_moniker', 'servicefabric/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') - if not self.credential_scopes and not self.authentication_policy: - raise ValueError("You must provide either credential_scopes or authentication_policy as kwargs") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_service_fabric_client_apis.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_service_fabric_client_apis.py deleted file mode 100644 index 1614ad668b47..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/_service_fabric_client_apis.py +++ /dev/null @@ -1,116 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any, Optional, TYPE_CHECKING - -from azure.core import AsyncPipelineClient -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials_async import AsyncTokenCredential - -from ._configuration import ServiceFabricClientAPIsConfiguration -from .operations import ServiceFabricClientAPIsOperationsMixin -from .operations import MeshSecretOperations -from .operations import MeshSecretValueOperations -from .operations import MeshVolumeOperations -from .operations import MeshNetworkOperations -from .operations import MeshApplicationOperations -from .operations import MeshServiceOperations -from .operations import MeshCodePackageOperations -from .operations import MeshServiceReplicaOperations -from .operations import MeshGatewayOperations -from .. import models - - -class ServiceFabricClientAPIs(ServiceFabricClientAPIsOperationsMixin): - """Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services. - - :ivar mesh_secret: MeshSecretOperations operations - :vartype mesh_secret: azure.servicefabric.aio.operations.MeshSecretOperations - :ivar mesh_secret_value: MeshSecretValueOperations operations - :vartype mesh_secret_value: azure.servicefabric.aio.operations.MeshSecretValueOperations - :ivar mesh_volume: MeshVolumeOperations operations - :vartype mesh_volume: azure.servicefabric.aio.operations.MeshVolumeOperations - :ivar mesh_network: MeshNetworkOperations operations - :vartype mesh_network: azure.servicefabric.aio.operations.MeshNetworkOperations - :ivar mesh_application: MeshApplicationOperations operations - :vartype mesh_application: azure.servicefabric.aio.operations.MeshApplicationOperations - :ivar mesh_service: MeshServiceOperations operations - :vartype mesh_service: azure.servicefabric.aio.operations.MeshServiceOperations - :ivar mesh_code_package: MeshCodePackageOperations operations - :vartype mesh_code_package: azure.servicefabric.aio.operations.MeshCodePackageOperations - :ivar mesh_service_replica: MeshServiceReplicaOperations operations - :vartype mesh_service_replica: azure.servicefabric.aio.operations.MeshServiceReplicaOperations - :ivar mesh_gateway: MeshGatewayOperations operations - :vartype mesh_gateway: azure.servicefabric.aio.operations.MeshGatewayOperations - :param credential: Credential needed for the client to connect to Azure. - :type credential: ~azure.core.credentials_async.AsyncTokenCredential - :param str base_url: Service URL - """ - - def __init__( - self, - credential: "AsyncTokenCredential", - base_url: Optional[str] = None, - **kwargs: Any - ) -> None: - if not base_url: - base_url = 'http://localhost:19080/' - self._config = ServiceFabricClientAPIsConfiguration(credential, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.mesh_secret = MeshSecretOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_secret_value = MeshSecretValueOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_volume = MeshVolumeOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_network = MeshNetworkOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_application = MeshApplicationOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_service = MeshServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_code_package = MeshCodePackageOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_service_replica = MeshServiceReplicaOperations( - self._client, self._config, self._serialize, self._deserialize) - self.mesh_gateway = MeshGatewayOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse - """ - http_request.url = self._client.format_url(http_request.url) - stream = kwargs.pop("stream", True) - pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "ServiceFabricClientAPIs": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/__init__.py deleted file mode 100644 index df6b66c53161..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_fabric_client_apis_operations import ServiceFabricClientAPIsOperationsMixin -from ._mesh_secret_operations import MeshSecretOperations -from ._mesh_secret_value_operations import MeshSecretValueOperations -from ._mesh_volume_operations import MeshVolumeOperations -from ._mesh_network_operations import MeshNetworkOperations -from ._mesh_application_operations import MeshApplicationOperations -from ._mesh_service_operations import MeshServiceOperations -from ._mesh_code_package_operations import MeshCodePackageOperations -from ._mesh_service_replica_operations import MeshServiceReplicaOperations -from ._mesh_gateway_operations import MeshGatewayOperations - -__all__ = [ - 'ServiceFabricClientAPIsOperationsMixin', - 'MeshSecretOperations', - 'MeshSecretValueOperations', - 'MeshVolumeOperations', - 'MeshNetworkOperations', - 'MeshApplicationOperations', - 'MeshServiceOperations', - 'MeshCodePackageOperations', - 'MeshServiceReplicaOperations', - 'MeshGatewayOperations', -] diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_application_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_application_operations.py deleted file mode 100644 index d80172e123a0..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_application_operations.py +++ /dev/null @@ -1,329 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class MeshApplicationOperations: - """MeshApplicationOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create_or_update( - self, - application_resource_name: str, - application_resource_description: "_models.ApplicationResourceDescription", - **kwargs - ) -> Optional["_models.ApplicationResourceDescription"]: - """Creates or updates a Application resource. - - Creates a Application resource with the specified name, description and properties. If - Application resource with the same name exists, then it is updated with the specified - description and properties. - - :param application_resource_name: The identity of the application. - :type application_resource_name: str - :param application_resource_description: Description for creating a Application resource. - :type application_resource_description: ~azure.servicefabric.models.ApplicationResourceDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(application_resource_description, 'ApplicationResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) - - if response.status_code == 201: - deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - create_or_update.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore - - async def get( - self, - application_resource_name: str, - **kwargs - ) -> "_models.ApplicationResourceDescription": - """Gets the Application resource with the given name. - - Gets the information about the Application resource with the given name. The information - include the description and other properties of the Application. - - :param application_resource_name: The identity of the application. - :type application_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore - - async def delete( - self, - application_resource_name: str, - **kwargs - ) -> None: - """Deletes the Application resource. - - Deletes the Application resource identified by the name. - - :param application_resource_name: The identity of the application. - :type application_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore - - async def list( - self, - **kwargs - ) -> "_models.PagedApplicationResourceDescriptionList": - """Lists all the application resources. - - Gets the information about all application resources in a given resource group. The information - include the description and other properties of the Application. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedApplicationResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedApplicationResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedApplicationResourceDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - list.metadata = {'url': '/Resources/Applications'} # type: ignore - - async def get_upgrade_progress( - self, - application_resource_name: str, - **kwargs - ) -> "_models.ApplicationResourceUpgradeProgressInfo": - """Gets the progress of the latest upgrade performed on this application resource. - - Gets the upgrade progress information about the Application resource with the given name. The - information include percentage of completion and other upgrade state information of the - Application resource. - - :param application_resource_name: The identity of the application. - :type application_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationResourceUpgradeProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationResourceUpgradeProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResourceUpgradeProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_upgrade_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationResourceUpgradeProgressInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_upgrade_progress.metadata = {'url': '/Resources/Applications/{applicationResourceName}/$/GetUpgradeProgress'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_code_package_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_code_package_operations.py deleted file mode 100644 index e0b901a23f4a..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_code_package_operations.py +++ /dev/null @@ -1,114 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class MeshCodePackageOperations: - """MeshCodePackageOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def get_container_logs( - self, - application_resource_name: str, - service_resource_name: str, - replica_name: str, - code_package_name: str, - tail: Optional[str] = None, - **kwargs - ) -> "_models.ContainerLogs": - """Gets the logs from the container. - - Gets the logs for the container of the specified code package of the service replica. - - :param application_resource_name: The identity of the application. - :type application_resource_name: str - :param service_resource_name: The identity of the service. - :type service_resource_name: str - :param replica_name: Service Fabric replica name. - :type replica_name: str - :param code_package_name: The name of code package of the service. - :type code_package_name: str - :param tail: Number of lines to show from the end of the logs. Default is 100. 'all' to show - the complete logs. - :type tail: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ContainerLogs, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ContainerLogs - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerLogs"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_container_logs.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), - 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True), - 'codePackageName': self._serialize.url("code_package_name", code_package_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if tail is not None: - query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ContainerLogs', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_container_logs.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}/CodePackages/{codePackageName}/Logs'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_gateway_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_gateway_operations.py deleted file mode 100644 index 33301f9ff97e..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_gateway_operations.py +++ /dev/null @@ -1,271 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class MeshGatewayOperations: - """MeshGatewayOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create_or_update( - self, - gateway_resource_name: str, - gateway_resource_description: "_models.GatewayResourceDescription", - **kwargs - ) -> Optional["_models.GatewayResourceDescription"]: - """Creates or updates a Gateway resource. - - Creates a Gateway resource with the specified name, description and properties. If Gateway - resource with the same name exists, then it is updated with the specified description and - properties. Use Gateway resource to provide public connectivity to application services. - - :param gateway_resource_name: The identity of the gateway. - :type gateway_resource_name: str - :param gateway_resource_description: Description for creating a Gateway resource. - :type gateway_resource_description: ~azure.servicefabric.models.GatewayResourceDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: GatewayResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.GatewayResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore - path_format_arguments = { - 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(gateway_resource_description, 'GatewayResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) - - if response.status_code == 201: - deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - create_or_update.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore - - async def get( - self, - gateway_resource_name: str, - **kwargs - ) -> "_models.GatewayResourceDescription": - """Gets the Gateway resource with the given name. - - Gets the information about the Gateway resource with the given name. The information include - the description and other properties of the Gateway. - - :param gateway_resource_name: The identity of the gateway. - :type gateway_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: GatewayResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.GatewayResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get.metadata['url'] # type: ignore - path_format_arguments = { - 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore - - async def delete( - self, - gateway_resource_name: str, - **kwargs - ) -> None: - """Deletes the Gateway resource. - - Deletes the Gateway resource identified by the name. - - :param gateway_resource_name: The identity of the gateway. - :type gateway_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore - - async def list( - self, - **kwargs - ) -> "_models.PagedGatewayResourceDescriptionList": - """Lists all the gateway resources. - - Gets the information about all gateway resources in a given resource group. The information - include the description and other properties of the Gateway. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedGatewayResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedGatewayResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedGatewayResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedGatewayResourceDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - list.metadata = {'url': '/Resources/Gateways'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_network_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_network_operations.py deleted file mode 100644 index 9a6f078c9322..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_network_operations.py +++ /dev/null @@ -1,276 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class MeshNetworkOperations: - """MeshNetworkOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create_or_update( - self, - network_resource_name: str, - name: str, - properties: "_models.NetworkResourceProperties", - **kwargs - ) -> Optional["_models.NetworkResourceDescription"]: - """Creates or updates a Network resource. - - Creates a Network resource with the specified name, description and properties. If Network - resource with the same name exists, then it is updated with the specified description and - properties. Network resource provides connectivity between application services. - - :param network_resource_name: The identity of the network. - :type network_resource_name: str - :param name: Name of the Network resource. - :type name: str - :param properties: Describes properties of a network resource. - :type properties: ~azure.servicefabric.models.NetworkResourceProperties - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NetworkResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NetworkResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NetworkResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _network_resource_description = _models.NetworkResourceDescription(name=name, properties=properties) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore - path_format_arguments = { - 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_network_resource_description, 'NetworkResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) - - if response.status_code == 201: - deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - create_or_update.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore - - async def get( - self, - network_resource_name: str, - **kwargs - ) -> "_models.NetworkResourceDescription": - """Gets the Network resource with the given name. - - Gets the information about the Network resource with the given name. The information include - the description and other properties of the Network. - - :param network_resource_name: The identity of the network. - :type network_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NetworkResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NetworkResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get.metadata['url'] # type: ignore - path_format_arguments = { - 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore - - async def delete( - self, - network_resource_name: str, - **kwargs - ) -> None: - """Deletes the Network resource. - - Deletes the Network resource identified by the name. - - :param network_resource_name: The identity of the network. - :type network_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore - - async def list( - self, - **kwargs - ) -> "_models.PagedNetworkResourceDescriptionList": - """Lists all the network resources. - - Gets the information about all network resources in a given resource group. The information - include the description and other properties of the Network. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedNetworkResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedNetworkResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedNetworkResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedNetworkResourceDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - list.metadata = {'url': '/Resources/Networks'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_operations.py deleted file mode 100644 index b7cf3f0fe22b..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_operations.py +++ /dev/null @@ -1,276 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class MeshSecretOperations: - """MeshSecretOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create_or_update( - self, - secret_resource_name: str, - properties: "_models.SecretResourceProperties", - name: str, - **kwargs - ) -> Optional["_models.SecretResourceDescription"]: - """Creates or updates a Secret resource. - - Creates a Secret resource with the specified name, description and properties. If Secret - resource with the same name exists, then it is updated with the specified description and - properties. Once created, the kind and contentType of a secret resource cannot be updated. - - :param secret_resource_name: The name of the secret resource. - :type secret_resource_name: str - :param properties: Describes the properties of a secret resource. - :type properties: ~azure.servicefabric.models.SecretResourceProperties - :param name: Name of the Secret resource. - :type name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SecretResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _secret_resource_description = _models.SecretResourceDescription(properties=properties, name=name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore - path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_secret_resource_description, 'SecretResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SecretResourceDescription', pipeline_response) - - if response.status_code == 201: - deserialized = self._deserialize('SecretResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - create_or_update.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore - - async def get( - self, - secret_resource_name: str, - **kwargs - ) -> "_models.SecretResourceDescription": - """Gets the Secret resource with the given name. - - Gets the information about the Secret resource with the given name. The information include the - description and other properties of the Secret. - - :param secret_resource_name: The name of the secret resource. - :type secret_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get.metadata['url'] # type: ignore - path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('SecretResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore - - async def delete( - self, - secret_resource_name: str, - **kwargs - ) -> None: - """Deletes the Secret resource. - - Deletes the specified Secret resource and all of its named values. - - :param secret_resource_name: The name of the secret resource. - :type secret_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore - - async def list( - self, - **kwargs - ) -> "_models.PagedSecretResourceDescriptionList": - """Lists all the secret resources. - - Gets the information about all secret resources in a given resource group. The information - include the description and other properties of the Secret. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedSecretResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedSecretResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSecretResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedSecretResourceDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - list.metadata = {'url': '/Resources/Secrets'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_value_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_value_operations.py deleted file mode 100644 index 1d8a0306c6fe..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_secret_value_operations.py +++ /dev/null @@ -1,360 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class MeshSecretValueOperations: - """MeshSecretValueOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def add_value( - self, - secret_resource_name: str, - secret_value_resource_name: str, - name: str, - value: Optional[str] = None, - **kwargs - ) -> Optional["_models.SecretValueResourceDescription"]: - """Adds the specified value as a new version of the specified secret resource. - - Creates a new value of the specified secret resource. The name of the value is typically the - version identifier. Once created the value cannot be changed. - - :param secret_resource_name: The name of the secret resource. - :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource value which is typically the - version identifier for the value. - :type secret_value_resource_name: str - :param name: Version identifier of the secret value. - :type name: str - :param value: The actual value of the secret. - :type value: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretValueResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretValueResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SecretValueResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _secret_value_resource_description = _models.SecretValueResourceDescription(name=name, value=value) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.add_value.metadata['url'] # type: ignore - path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_secret_value_resource_description, 'SecretValueResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) - - if response.status_code == 201: - deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - add_value.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore - - async def get( - self, - secret_resource_name: str, - secret_value_resource_name: str, - **kwargs - ) -> "_models.SecretValueResourceDescription": - """Gets the specified secret value resource. - - Get the information about the specified named secret value resources. The information does not - include the actual value of the secret. - - :param secret_resource_name: The name of the secret resource. - :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource value which is typically the - version identifier for the value. - :type secret_value_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretValueResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretValueResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretValueResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get.metadata['url'] # type: ignore - path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore - - async def delete( - self, - secret_resource_name: str, - secret_value_resource_name: str, - **kwargs - ) -> None: - """Deletes the specified value of the named secret resource. - - Deletes the secret value resource identified by the name. The name of the resource is typically - the version associated with that value. Deletion will fail if the specified value is in use. - - :param secret_resource_name: The name of the secret resource. - :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource value which is typically the - version identifier for the value. - :type secret_value_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore - - async def list( - self, - secret_resource_name: str, - **kwargs - ) -> "_models.PagedSecretValueResourceDescriptionList": - """List names of all values of the specified secret resource. - - Gets information about all secret value resources of the specified secret resource. The - information includes the names of the secret value resources, but not the actual values. - - :param secret_resource_name: The name of the secret resource. - :type secret_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedSecretValueResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedSecretValueResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSecretValueResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.list.metadata['url'] # type: ignore - path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedSecretValueResourceDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - list.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values'} # type: ignore - - async def show( - self, - secret_resource_name: str, - secret_value_resource_name: str, - **kwargs - ) -> "_models.SecretValue": - """Lists the specified value of the secret resource. - - Lists the decrypted value of the specified named value of the secret resource. This is a - privileged operation. - - :param secret_resource_name: The name of the secret resource. - :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource value which is typically the - version identifier for the value. - :type secret_value_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretValue, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretValue - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretValue"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.show.metadata['url'] # type: ignore - path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('SecretValue', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - show.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}/list_value'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_operations.py deleted file mode 100644 index 963b70b09c2d..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_operations.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class MeshServiceOperations: - """MeshServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def get( - self, - application_resource_name: str, - service_resource_name: str, - **kwargs - ) -> "_models.ServiceResourceDescription": - """Gets the Service resource with the given name. - - Gets the information about the Service resource with the given name. The information include - the description and other properties of the Service. - - :param application_resource_name: The identity of the application. - :type application_resource_name: str - :param service_resource_name: The identity of the service. - :type service_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}'} # type: ignore - - async def list( - self, - application_resource_name: str, - **kwargs - ) -> "_models.PagedServiceResourceDescriptionList": - """Lists all the service resources. - - Gets the information about all services of an application resource. The information include the - description and other properties of the Service. - - :param application_resource_name: The identity of the application. - :type application_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedServiceResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedServiceResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedServiceResourceDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_replica_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_replica_operations.py deleted file mode 100644 index 0e7ef70ae4d7..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_service_replica_operations.py +++ /dev/null @@ -1,166 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class MeshServiceReplicaOperations: - """MeshServiceReplicaOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def get( - self, - application_resource_name: str, - service_resource_name: str, - replica_name: str, - **kwargs - ) -> "_models.ServiceReplicaDescription": - """Gets the given replica of the service of an application. - - Gets the information about the service replica with the given name. The information include the - description and other properties of the service replica. - - :param application_resource_name: The identity of the application. - :type application_resource_name: str - :param service_resource_name: The identity of the service. - :type service_resource_name: str - :param replica_name: Service Fabric replica name. - :type replica_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceReplicaDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceReplicaDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceReplicaDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), - 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceReplicaDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}'} # type: ignore - - async def list( - self, - application_resource_name: str, - service_resource_name: str, - **kwargs - ) -> "_models.PagedServiceReplicaDescriptionList": - """Lists all the replicas of a service. - - Gets the information about all replicas of a service. The information include the description - and other properties of the service replica. - - :param application_resource_name: The identity of the application. - :type application_resource_name: str - :param service_resource_name: The identity of the service. - :type service_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedServiceReplicaDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedServiceReplicaDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceReplicaDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedServiceReplicaDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_volume_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_volume_operations.py deleted file mode 100644 index e7e37d3ee33f..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_mesh_volume_operations.py +++ /dev/null @@ -1,271 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class MeshVolumeOperations: - """MeshVolumeOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create_or_update( - self, - volume_resource_name: str, - volume_resource_description: "_models.VolumeResourceDescription", - **kwargs - ) -> Optional["_models.VolumeResourceDescription"]: - """Creates or updates a Volume resource. - - Creates a Volume resource with the specified name, description and properties. If Volume - resource with the same name exists, then it is updated with the specified description and - properties. - - :param volume_resource_name: The identity of the volume. - :type volume_resource_name: str - :param volume_resource_description: Description for creating a Volume resource. - :type volume_resource_description: ~azure.servicefabric.models.VolumeResourceDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: VolumeResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.VolumeResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VolumeResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore - path_format_arguments = { - 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(volume_resource_description, 'VolumeResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) - - if response.status_code == 201: - deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - create_or_update.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore - - async def get( - self, - volume_resource_name: str, - **kwargs - ) -> "_models.VolumeResourceDescription": - """Gets the Volume resource with the given name. - - Gets the information about the Volume resource with the given name. The information include the - description and other properties of the Volume. - - :param volume_resource_name: The identity of the volume. - :type volume_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: VolumeResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.VolumeResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.VolumeResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get.metadata['url'] # type: ignore - path_format_arguments = { - 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore - - async def delete( - self, - volume_resource_name: str, - **kwargs - ) -> None: - """Deletes the Volume resource. - - Deletes the Volume resource identified by the name. - - :param volume_resource_name: The identity of the volume. - :type volume_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore - - async def list( - self, - **kwargs - ) -> "_models.PagedVolumeResourceDescriptionList": - """Lists all the volume resources. - - Gets the information about all volume resources in a given resource group. The information - include the description and other properties of the Volume. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedVolumeResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedVolumeResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedVolumeResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedVolumeResourceDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - list.metadata = {'url': '/Resources/Volumes'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_service_fabric_client_apis_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_service_fabric_client_apis_operations.py deleted file mode 100644 index 1577734abd5d..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/aio/operations/_service_fabric_client_apis_operations.py +++ /dev/null @@ -1,16649 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceFabricClientAPIsOperationsMixin: - - async def get_cluster_manifest( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ClusterManifest": - """Get the Service Fabric cluster manifest. - - Get the Service Fabric cluster manifest. The cluster manifest contains properties of the - cluster that include different node types on the cluster, - security configurations, fault, and upgrade domain topologies, etc. - - These properties are specified as part of the ClusterConfig.JSON file while deploying a - stand-alone cluster. However, most of the information in the cluster manifest - is generated internally by service fabric during cluster deployment in other deployment - scenarios (e.g. when using Azure portal). - - The contents of the cluster manifest are for informational purposes only and users are not - expected to take a dependency on the format of the file contents or its interpretation. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterManifest, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterManifest - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterManifest"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_manifest.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterManifest', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_manifest.metadata = {'url': '/$/GetClusterManifest'} # type: ignore - - async def get_cluster_health( - self, - nodes_health_state_filter: Optional[int] = 0, - applications_health_state_filter: Optional[int] = 0, - events_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - include_system_application_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ClusterHealth": - """Gets the health of a Service Fabric cluster. - - Use EventsHealthStateFilter to filter the collection of health events reported on the cluster - based on the health state. - Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the - collection of nodes and applications returned based on their aggregated health state. - - :param nodes_health_state_filter: Allows filtering of the node health state objects returned in - the result of cluster health query - based on their health state. The possible values for this parameter include integer value of - one of the - following health states. Only nodes that match the filter are returned. All nodes are used to - evaluate the aggregated health state. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of nodes with HealthState value of - OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type nodes_health_state_filter: int - :param applications_health_state_filter: Allows filtering of the application health state - objects returned in the result of cluster health - query based on their health state. - The possible values for this parameter include integer value obtained from members or bitwise - operations - on members of HealthStateFilter enumeration. Only applications that match the filter are - returned. - All applications are used to evaluate the aggregated health state. If not specified, all - entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of applications with HealthState - value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type applications_health_state_filter: int - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param include_system_application_health_statistics: Indicates whether the health statistics - should include the fabric:/System application health statistics. False by default. - If IncludeSystemApplicationHealthStatistics is set to true, the health statistics include the - entities that belong to the fabric:/System application. - Otherwise, the query result includes health statistics only for user applications. - The health statistics must be included in the query result for this parameter to be applied. - :type include_system_application_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_health.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if nodes_health_state_filter is not None: - query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') - if applications_health_state_filter is not None: - query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if include_system_application_health_statistics is not None: - query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_health.metadata = {'url': '/$/GetClusterHealth'} # type: ignore - - async def get_cluster_health_using_policy( - self, - nodes_health_state_filter: Optional[int] = 0, - applications_health_state_filter: Optional[int] = 0, - events_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - include_system_application_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - application_health_policy_map: Optional[List["_models.ApplicationHealthPolicyMapItem"]] = None, - cluster_health_policy: Optional["_models.ClusterHealthPolicy"] = None, - **kwargs - ) -> "_models.ClusterHealth": - """Gets the health of a Service Fabric cluster using the specified policy. - - Use EventsHealthStateFilter to filter the collection of health events reported on the cluster - based on the health state. - Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the - collection of nodes and applications returned based on their aggregated health state. - Use ClusterHealthPolicies to override the health policies used to evaluate the health. - - :param nodes_health_state_filter: Allows filtering of the node health state objects returned in - the result of cluster health query - based on their health state. The possible values for this parameter include integer value of - one of the - following health states. Only nodes that match the filter are returned. All nodes are used to - evaluate the aggregated health state. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of nodes with HealthState value of - OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type nodes_health_state_filter: int - :param applications_health_state_filter: Allows filtering of the application health state - objects returned in the result of cluster health - query based on their health state. - The possible values for this parameter include integer value obtained from members or bitwise - operations - on members of HealthStateFilter enumeration. Only applications that match the filter are - returned. - All applications are used to evaluate the aggregated health state. If not specified, all - entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of applications with HealthState - value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type applications_health_state_filter: int - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param include_system_application_health_statistics: Indicates whether the health statistics - should include the fabric:/System application health statistics. False by default. - If IncludeSystemApplicationHealthStatistics is set to true, the health statistics include the - entities that belong to the fabric:/System application. - Otherwise, the query result includes health statistics only for user applications. - The health statistics must be included in the query result for this parameter to be applied. - :type include_system_application_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy_map: Defines a map that contains specific application health - policies for different applications. - Each entry specifies as key the application name and as value an ApplicationHealthPolicy used - to evaluate the application health. - If an application is not specified in the map, the application health evaluation uses the - ApplicationHealthPolicy found in its application manifest or the default application health - policy (if no health policy is defined in the manifest). - The map is empty by default. - :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cluster_health_policies = _models.ClusterHealthPolicies(application_health_policy_map=application_health_policy_map, cluster_health_policy=cluster_health_policy) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_cluster_health_using_policy.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if nodes_health_state_filter is not None: - query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') - if applications_health_state_filter is not None: - query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if include_system_application_health_statistics is not None: - query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _cluster_health_policies is not None: - body_content = self._serialize.body(_cluster_health_policies, 'ClusterHealthPolicies') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_health_using_policy.metadata = {'url': '/$/GetClusterHealth'} # type: ignore - - async def get_cluster_health_chunk( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ClusterHealthChunk": - """Gets the health of a Service Fabric cluster using health chunks. - - Gets the health of a Service Fabric cluster using health chunks. Includes the aggregated health - state of the cluster, but none of the cluster entities. - To expand the cluster health and get the health state of all or some of the entities, use the - POST URI and specify the cluster health chunk query description. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterHealthChunk, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterHealthChunk - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealthChunk"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_health_chunk.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterHealthChunk', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_health_chunk.metadata = {'url': '/$/GetClusterHealthChunk'} # type: ignore - - async def get_cluster_health_chunk_using_policy_and_advanced_filters( - self, - timeout: Optional[int] = 60, - cluster_health_chunk_query_description: Optional["_models.ClusterHealthChunkQueryDescription"] = None, - **kwargs - ) -> "_models.ClusterHealthChunk": - """Gets the health of a Service Fabric cluster using health chunks. - - Gets the health of a Service Fabric cluster using health chunks. The health evaluation is done - based on the input cluster health chunk query description. - The query description allows users to specify health policies for evaluating the cluster and - its children. - Users can specify very flexible filters to select which cluster entities to return. The - selection can be done based on the entities health state and based on the hierarchy. - The query can return multi-level children of the entities based on the specified filters. For - example, it can return one application with a specified name, and for this application, return - only services that are in Error or Warning, and all partitions and replicas for one of these - services. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param cluster_health_chunk_query_description: Describes the cluster and application health - policies used to evaluate the cluster health and the filters to select which cluster entities - to be returned. - If the cluster health policy is present, it is used to evaluate the cluster events and the - cluster nodes. If not present, the health evaluation uses the cluster health policy defined in - the cluster manifest or the default cluster health policy. - By default, each application is evaluated using its specific application health policy, - defined in the application manifest, or the default health policy, if no policy is defined in - manifest. - If the application health policy map is specified, and it has an entry for an application, the - specified application health policy - is used to evaluate the application health. - Users can specify very flexible filters to select which cluster entities to include in - response. The selection can be done based on the entities health state and based on the - hierarchy. - The query can return multi-level children of the entities based on the specified filters. For - example, it can return one application with a specified name, and for this application, return - only services that are in Error or Warning, and all partitions and replicas for one of these - services. - :type cluster_health_chunk_query_description: ~azure.servicefabric.models.ClusterHealthChunkQueryDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterHealthChunk, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterHealthChunk - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealthChunk"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_cluster_health_chunk_using_policy_and_advanced_filters.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if cluster_health_chunk_query_description is not None: - body_content = self._serialize.body(cluster_health_chunk_query_description, 'ClusterHealthChunkQueryDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterHealthChunk', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_health_chunk_using_policy_and_advanced_filters.metadata = {'url': '/$/GetClusterHealthChunk'} # type: ignore - - async def report_cluster_health( - self, - health_information: "_models.HealthInformation", - immediate: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Sends a health report on the Service Fabric cluster. - - Sends a health report on a Service Fabric cluster. The report must contain the information - about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway node, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetClusterHealth and check that - the report appears in the HealthEvents section. - - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_cluster_health.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_cluster_health.metadata = {'url': '/$/ReportClusterHealth'} # type: ignore - - async def get_provisioned_fabric_code_version_info_list( - self, - code_version: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> List["_models.FabricCodeVersionInfo"]: - """Gets a list of fabric code versions that are provisioned in a Service Fabric cluster. - - Gets a list of information about fabric code versions that are provisioned in the cluster. The - parameter CodeVersion can be used to optionally filter the output to only that particular - version. - - :param code_version: The product version of Service Fabric. - :type code_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of FabricCodeVersionInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.FabricCodeVersionInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricCodeVersionInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_provisioned_fabric_code_version_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if code_version is not None: - query_parameters['CodeVersion'] = self._serialize.query("code_version", code_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[FabricCodeVersionInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_provisioned_fabric_code_version_info_list.metadata = {'url': '/$/GetProvisionedCodeVersions'} # type: ignore - - async def get_provisioned_fabric_config_version_info_list( - self, - config_version: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> List["_models.FabricConfigVersionInfo"]: - """Gets a list of fabric config versions that are provisioned in a Service Fabric cluster. - - Gets a list of information about fabric config versions that are provisioned in the cluster. - The parameter ConfigVersion can be used to optionally filter the output to only that particular - version. - - :param config_version: The config version of Service Fabric. - :type config_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of FabricConfigVersionInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.FabricConfigVersionInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricConfigVersionInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_provisioned_fabric_config_version_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if config_version is not None: - query_parameters['ConfigVersion'] = self._serialize.query("config_version", config_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[FabricConfigVersionInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_provisioned_fabric_config_version_info_list.metadata = {'url': '/$/GetProvisionedConfigVersions'} # type: ignore - - async def get_cluster_upgrade_progress( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ClusterUpgradeProgressObject": - """Gets the progress of the current cluster upgrade. - - Gets the current progress of the ongoing cluster upgrade. If no upgrade is currently in - progress, get the last state of the previous cluster upgrade. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterUpgradeProgressObject, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterUpgradeProgressObject - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterUpgradeProgressObject"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_upgrade_progress.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterUpgradeProgressObject', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_upgrade_progress.metadata = {'url': '/$/GetUpgradeProgress'} # type: ignore - - async def get_cluster_configuration( - self, - configuration_api_version: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ClusterConfiguration": - """Get the Service Fabric standalone cluster configuration. - - The cluster configuration contains properties of the cluster that include different node types - on the cluster, - security configurations, fault, and upgrade domain topologies, etc. - - :param configuration_api_version: The API version of the Standalone cluster json configuration. - :type configuration_api_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterConfiguration, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterConfiguration - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterConfiguration"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_configuration.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ConfigurationApiVersion'] = self._serialize.query("configuration_api_version", configuration_api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterConfiguration', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_configuration.metadata = {'url': '/$/GetClusterConfiguration'} # type: ignore - - async def get_cluster_configuration_upgrade_status( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ClusterConfigurationUpgradeStatusInfo": - """Get the cluster configuration upgrade status of a Service Fabric standalone cluster. - - Get the cluster configuration upgrade status details of a Service Fabric standalone cluster. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterConfigurationUpgradeStatusInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterConfigurationUpgradeStatusInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterConfigurationUpgradeStatusInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_configuration_upgrade_status.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterConfigurationUpgradeStatusInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_configuration_upgrade_status.metadata = {'url': '/$/GetClusterConfigurationUpgradeStatus'} # type: ignore - - async def get_upgrade_orchestration_service_state( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.UpgradeOrchestrationServiceState": - """Get the service state of Service Fabric Upgrade Orchestration Service. - - Get the service state of Service Fabric Upgrade Orchestration Service. This API is internally - used for support purposes. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UpgradeOrchestrationServiceState, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceState - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UpgradeOrchestrationServiceState"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_upgrade_orchestration_service_state.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UpgradeOrchestrationServiceState', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_upgrade_orchestration_service_state.metadata = {'url': '/$/GetUpgradeOrchestrationServiceState'} # type: ignore - - async def set_upgrade_orchestration_service_state( - self, - timeout: Optional[int] = 60, - service_state: Optional[str] = None, - **kwargs - ) -> "_models.UpgradeOrchestrationServiceStateSummary": - """Update the service state of Service Fabric Upgrade Orchestration Service. - - Update the service state of Service Fabric Upgrade Orchestration Service. This API is - internally used for support purposes. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param service_state: The state of Service Fabric Upgrade Orchestration Service. - :type service_state: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UpgradeOrchestrationServiceStateSummary, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceStateSummary - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UpgradeOrchestrationServiceStateSummary"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _upgrade_orchestration_service_state = _models.UpgradeOrchestrationServiceState(service_state=service_state) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.set_upgrade_orchestration_service_state.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_upgrade_orchestration_service_state, 'UpgradeOrchestrationServiceState') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UpgradeOrchestrationServiceStateSummary', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - set_upgrade_orchestration_service_state.metadata = {'url': '/$/SetUpgradeOrchestrationServiceState'} # type: ignore - - async def provision_cluster( - self, - timeout: Optional[int] = 60, - code_file_path: Optional[str] = None, - cluster_manifest_file_path: Optional[str] = None, - **kwargs - ) -> None: - """Provision the code or configuration packages of a Service Fabric cluster. - - Validate and provision the code or configuration packages of a Service Fabric cluster. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param code_file_path: The cluster code package file path. - :type code_file_path: str - :param cluster_manifest_file_path: The cluster manifest file path. - :type cluster_manifest_file_path: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _provision_fabric_description = _models.ProvisionFabricDescription(code_file_path=code_file_path, cluster_manifest_file_path=cluster_manifest_file_path) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.provision_cluster.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_provision_fabric_description, 'ProvisionFabricDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - provision_cluster.metadata = {'url': '/$/Provision'} # type: ignore - - async def unprovision_cluster( - self, - timeout: Optional[int] = 60, - code_version: Optional[str] = None, - config_version: Optional[str] = None, - **kwargs - ) -> None: - """Unprovision the code or configuration packages of a Service Fabric cluster. - - It is supported to unprovision code and configuration separately. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param code_version: The cluster code package version. - :type code_version: str - :param config_version: The cluster manifest version. - :type config_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _unprovision_fabric_description = _models.UnprovisionFabricDescription(code_version=code_version, config_version=config_version) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.unprovision_cluster.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_unprovision_fabric_description, 'UnprovisionFabricDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - unprovision_cluster.metadata = {'url': '/$/Unprovision'} # type: ignore - - async def rollback_cluster_upgrade( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Roll back the upgrade of a Service Fabric cluster. - - Roll back the code or configuration upgrade of a Service Fabric cluster. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.rollback_cluster_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - rollback_cluster_upgrade.metadata = {'url': '/$/RollbackUpgrade'} # type: ignore - - async def resume_cluster_upgrade( - self, - upgrade_domain: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Make the cluster upgrade move on to the next upgrade domain. - - Make the cluster code or configuration upgrade move on to the next upgrade domain if - appropriate. - - :param upgrade_domain: The next upgrade domain for this cluster upgrade. - :type upgrade_domain: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _resume_cluster_upgrade_description = _models.ResumeClusterUpgradeDescription(upgrade_domain=upgrade_domain) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.resume_cluster_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_resume_cluster_upgrade_description, 'ResumeClusterUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_cluster_upgrade.metadata = {'url': '/$/MoveToNextUpgradeDomain'} # type: ignore - - async def start_cluster_upgrade( - self, - start_cluster_upgrade_description: "_models.StartClusterUpgradeDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Start upgrading the code or configuration version of a Service Fabric cluster. - - Validate the supplied upgrade parameters and start upgrading the code or configuration version - of a Service Fabric cluster if the parameters are valid. - - :param start_cluster_upgrade_description: Describes the parameters for starting a cluster - upgrade. - :type start_cluster_upgrade_description: ~azure.servicefabric.models.StartClusterUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_cluster_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(start_cluster_upgrade_description, 'StartClusterUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_cluster_upgrade.metadata = {'url': '/$/Upgrade'} # type: ignore - - async def start_cluster_configuration_upgrade( - self, - cluster_configuration_upgrade_description: "_models.ClusterConfigurationUpgradeDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Start upgrading the configuration of a Service Fabric standalone cluster. - - Validate the supplied configuration upgrade parameters and start upgrading the cluster - configuration if the parameters are valid. - - :param cluster_configuration_upgrade_description: Parameters for a standalone cluster - configuration upgrade. - :type cluster_configuration_upgrade_description: ~azure.servicefabric.models.ClusterConfigurationUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_cluster_configuration_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(cluster_configuration_upgrade_description, 'ClusterConfigurationUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_cluster_configuration_upgrade.metadata = {'url': '/$/StartClusterConfigurationUpgrade'} # type: ignore - - async def update_cluster_upgrade( - self, - update_cluster_upgrade_description: "_models.UpdateClusterUpgradeDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Update the upgrade parameters of a Service Fabric cluster upgrade. - - Update the upgrade parameters used during a Service Fabric cluster upgrade. - - :param update_cluster_upgrade_description: Parameters for updating a cluster upgrade. - :type update_cluster_upgrade_description: ~azure.servicefabric.models.UpdateClusterUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_cluster_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(update_cluster_upgrade_description, 'UpdateClusterUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - update_cluster_upgrade.metadata = {'url': '/$/UpdateUpgrade'} # type: ignore - - async def get_aad_metadata( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.AadMetadataObject": - """Gets the Azure Active Directory metadata used for secured connection to cluster. - - Gets the Azure Active Directory metadata used for secured connection to cluster. - This API is not supposed to be called separately. It provides information needed to set up an - Azure Active Directory secured connection with a Service Fabric cluster. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: AadMetadataObject, or the result of cls(response) - :rtype: ~azure.servicefabric.models.AadMetadataObject - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.AadMetadataObject"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_aad_metadata.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('AadMetadataObject', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_aad_metadata.metadata = {'url': '/$/GetAadMetadata'} # type: ignore - - async def get_cluster_version( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ClusterVersion": - """Get the current Service Fabric cluster version. - - If a cluster upgrade is happening, then this API will return the lowest (older) version of the - current and target cluster runtime versions. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterVersion, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterVersion - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterVersion"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_version.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterVersion', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_version.metadata = {'url': '/$/GetClusterVersion'} # type: ignore - - async def get_cluster_load( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ClusterLoadInfo": - """Gets the load of a Service Fabric cluster. - - Retrieves the load information of a Service Fabric cluster for all the metrics that have load - or capacity defined. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterLoadInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterLoadInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterLoadInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_load.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterLoadInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_load.metadata = {'url': '/$/GetLoadInformation'} # type: ignore - - async def toggle_verbose_service_placement_health_reporting( - self, - enabled: bool, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Changes the verbosity of service placement health reporting. - - If verbosity is set to true, then detailed health reports will be generated when replicas - cannot be placed or dropped. - If verbosity is set to false, then no health reports will be generated when replicas cannot be - placed or dropped. - - :param enabled: The verbosity of service placement health reporting. - :type enabled: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.toggle_verbose_service_placement_health_reporting.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['Enabled'] = self._serialize.query("enabled", enabled, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - toggle_verbose_service_placement_health_reporting.metadata = {'url': '/$/ToggleVerboseServicePlacementHealthReporting'} # type: ignore - - async def get_node_info_list( - self, - continuation_token_parameter: Optional[str] = None, - node_status_filter: Optional[Union[str, "_models.NodeStatusFilter"]] = "default", - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedNodeInfoList": - """Gets the list of nodes in the Service Fabric cluster. - - The response includes the name, status, ID, health, uptime, and other details about the nodes. - - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param node_status_filter: Allows filtering the nodes based on the NodeStatus. Only the nodes - that are matching the specified filter value will be returned. The filter value can be one of - the following. - :type node_status_filter: str or ~azure.servicefabric.models.NodeStatusFilter - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedNodeInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedNodeInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedNodeInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if node_status_filter is not None: - query_parameters['NodeStatusFilter'] = self._serialize.query("node_status_filter", node_status_filter, 'str') - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedNodeInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_info_list.metadata = {'url': '/Nodes'} # type: ignore - - async def get_node_info( - self, - node_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional["_models.NodeInfo"]: - """Gets the information about a specific node in the Service Fabric cluster. - - The response includes the name, status, ID, health, uptime, and other details about the node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NodeInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NodeInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_info.metadata = {'url': '/Nodes/{nodeName}'} # type: ignore - - async def get_node_health( - self, - node_name: str, - events_health_state_filter: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.NodeHealth": - """Gets the health of a Service Fabric node. - - Gets the health of a Service Fabric node. Use EventsHealthStateFilter to filter the collection - of health events reported on the node based on the health state. If the node that you specify - by name does not exist in the health store, this returns an error. - - :param node_name: The name of the node. - :type node_name: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('NodeHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_health.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} # type: ignore - - async def get_node_health_using_policy( - self, - node_name: str, - events_health_state_filter: Optional[int] = 0, - timeout: Optional[int] = 60, - cluster_health_policy: Optional["_models.ClusterHealthPolicy"] = None, - **kwargs - ) -> "_models.NodeHealth": - """Gets the health of a Service Fabric node, by using the specified health policy. - - Gets the health of a Service Fabric node. Use EventsHealthStateFilter to filter the collection - of health events reported on the node based on the health state. Use ClusterHealthPolicy in the - POST body to override the health policies used to evaluate the health. If the node that you - specify by name does not exist in the health store, this returns an error. - - :param node_name: The name of the node. - :type node_name: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param cluster_health_policy: Describes the health policies used to evaluate the health of a - cluster or node. If not present, the health evaluation uses the health policy from cluster - manifest or the default health policy. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_node_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if cluster_health_policy is not None: - body_content = self._serialize.body(cluster_health_policy, 'ClusterHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('NodeHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} # type: ignore - - async def report_node_health( - self, - node_name: str, - health_information: "_models.HealthInformation", - immediate: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Sends a health report on the Service Fabric node. - - Reports health state of the specified Service Fabric node. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway node, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetNodeHealth and check that the - report appears in the HealthEvents section. - - :param node_name: The name of the node. - :type node_name: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_node_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_node_health.metadata = {'url': '/Nodes/{nodeName}/$/ReportHealth'} # type: ignore - - async def get_node_load_info( - self, - node_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.NodeLoadInfo": - """Gets the load information of a Service Fabric node. - - Retrieves the load information of a Service Fabric node for all the metrics that have load or - capacity defined. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeLoadInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeLoadInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeLoadInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_load_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('NodeLoadInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_load_info.metadata = {'url': '/Nodes/{nodeName}/$/GetLoadInformation'} # type: ignore - - async def disable_node( - self, - node_name: str, - timeout: Optional[int] = 60, - deactivation_intent: Optional[Union[str, "_models.DeactivationIntent"]] = None, - **kwargs - ) -> None: - """Deactivate a Service Fabric cluster node with the specified deactivation intent. - - Deactivate a Service Fabric cluster node with the specified deactivation intent. Once the - deactivation is in progress, the deactivation intent can be increased, but not decreased (for - example, a node that is deactivated with the Pause intent can be deactivated further with - Restart, but not the other way around. Nodes may be reactivated using the Activate a node - operation any time after they are deactivated. If the deactivation is not complete, this will - cancel the deactivation. A node that goes down and comes back up while deactivated will still - need to be reactivated before services will be placed on that node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param deactivation_intent: Describes the intent or reason for deactivating the node. The - possible values are following. - :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _deactivation_intent_description = _models.DeactivationIntentDescription(deactivation_intent=deactivation_intent) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.disable_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_deactivation_intent_description, 'DeactivationIntentDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - disable_node.metadata = {'url': '/Nodes/{nodeName}/$/Deactivate'} # type: ignore - - async def enable_node( - self, - node_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Activate a Service Fabric cluster node that is currently deactivated. - - Activates a Service Fabric cluster node that is currently deactivated. Once activated, the node - will again become a viable target for placing new replicas, and any deactivated replicas - remaining on the node will be reactivated. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.enable_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - enable_node.metadata = {'url': '/Nodes/{nodeName}/$/Activate'} # type: ignore - - async def remove_node_state( - self, - node_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Notifies Service Fabric that the persisted state on a node has been permanently removed or lost. - - This implies that it is not possible to recover the persisted state of that node. This - generally happens if a hard disk has been wiped clean, or if a hard disk crashes. The node has - to be down for this operation to be successful. This operation lets Service Fabric know that - the replicas on that node no longer exist, and that Service Fabric should stop waiting for - those replicas to come back up. Do not run this cmdlet if the state on the node has not been - removed and the node can come back up with its state intact. Starting from Service Fabric 6.5, - in order to use this API for seed nodes, please change the seed nodes to regular (non-seed) - nodes and then invoke this API to remove the node state. If the cluster is running on Azure, - after the seed node goes down, Service Fabric will try to change it to a non-seed node - automatically. To make this happen, make sure the number of non-seed nodes in the primary node - type is no less than the number of Down seed nodes. If necessary, add more nodes to the primary - node type to achieve this. For standalone cluster, if the Down seed node is not expected to - come back up with its state intact, please remove the node from the cluster, see - https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-windows-server-add-remove-nodes. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.remove_node_state.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_node_state.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeState'} # type: ignore - - async def restart_node( - self, - node_name: str, - timeout: Optional[int] = 60, - node_instance_id: str = "0", - create_fabric_dump: Optional[Union[str, "_models.CreateFabricDump"]] = "False", - **kwargs - ) -> None: - """Restarts a Service Fabric cluster node. - - Restarts a Service Fabric cluster node that is already started. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param node_instance_id: The instance ID of the target node. If instance ID is specified the - node is restarted only if it matches with the current instance of the node. A default value of - "0" would match any instance ID. The instance ID can be obtained using get node query. - :type node_instance_id: str - :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is - case-sensitive. - :type create_fabric_dump: str or ~azure.servicefabric.models.CreateFabricDump - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _restart_node_description = _models.RestartNodeDescription(node_instance_id=node_instance_id, create_fabric_dump=create_fabric_dump) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.restart_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_restart_node_description, 'RestartNodeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - restart_node.metadata = {'url': '/Nodes/{nodeName}/$/Restart'} # type: ignore - - async def remove_configuration_overrides( - self, - node_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Removes configuration overrides on the specified node. - - This api allows removing all existing configuration overrides on specified node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.remove_configuration_overrides.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/RemoveConfigurationOverrides'} # type: ignore - - async def get_configuration_overrides( - self, - node_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> List["_models.ConfigParameterOverride"]: - """Gets the list of configuration overrides on the specified node. - - This api allows getting all existing configuration overrides on the specified node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ConfigParameterOverride, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ConfigParameterOverride] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConfigParameterOverride"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_configuration_overrides.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ConfigParameterOverride]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/GetConfigurationOverrides'} # type: ignore - - async def add_configuration_parameter_overrides( - self, - node_name: str, - config_parameter_override_list: List["_models.ConfigParameterOverride"], - force: Optional[bool] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Adds the list of configuration overrides on the specified node. - - This api allows adding all existing configuration overrides on the specified node. - - :param node_name: The name of the node. - :type node_name: str - :param config_parameter_override_list: Description for adding list of configuration overrides. - :type config_parameter_override_list: list[~azure.servicefabric.models.ConfigParameterOverride] - :param force: Force adding configuration overrides on specified nodes. - :type force: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.add_configuration_parameter_overrides.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force is not None: - query_parameters['Force'] = self._serialize.query("force", force, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(config_parameter_override_list, '[ConfigParameterOverride]') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - add_configuration_parameter_overrides.metadata = {'url': '/Nodes/{nodeName}/$/AddConfigurationParameterOverrides'} # type: ignore - - async def remove_node_tags( - self, - node_name: str, - node_tags: List[str], - **kwargs - ) -> None: - """Removes the list of tags from the specified node. - - This api allows removing set of tags from the specified node. - - :param node_name: The name of the node. - :type node_name: str - :param node_tags: Description for adding list of node tags. - :type node_tags: list[str] - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.remove_node_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(node_tags, '[str]') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeTags'} # type: ignore - - async def add_node_tags( - self, - node_name: str, - node_tags: List[str], - **kwargs - ) -> None: - """Adds the list of tags on the specified node. - - This api allows adding tags to the specified node. - - :param node_name: The name of the node. - :type node_name: str - :param node_tags: Description for adding list of node tags. - :type node_tags: list[str] - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.add_node_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(node_tags, '[str]') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - add_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/AddNodeTags'} # type: ignore - - async def get_application_type_info_list( - self, - application_type_definition_kind_filter: Optional[int] = 0, - exclude_application_parameters: Optional[bool] = False, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedApplicationTypeInfoList": - """Gets the list of application types in the Service Fabric cluster. - - Returns the information about the application types that are provisioned or in the process of - being provisioned in the Service Fabric cluster. Each version of an application type is - returned as one application type. The response includes the name, version, status, and other - details about the application type. This is a paged query, meaning that if not all of the - application types fit in a page, one page of results is returned as well as a continuation - token, which can be used to get the next page. For example, if there are 10 application types - but a page only fits the first three application types, or if max results is set to 3, then - three is returned. To access the rest of the results, retrieve subsequent pages by using the - returned continuation token in the next query. An empty continuation token is returned if there - are no subsequent pages. - - :param application_type_definition_kind_filter: Used to filter on ApplicationTypeDefinitionKind - which is the mechanism used to define a Service Fabric application type. - - - * Default - Default value, which performs the same function as selecting "All". The value is - 0. - * All - Filter that matches input with any ApplicationTypeDefinitionKind value. The value is - 65535. - * ServiceFabricApplicationPackage - Filter that matches input with - ApplicationTypeDefinitionKind value ServiceFabricApplicationPackage. The value is 1. - * Compose - Filter that matches input with ApplicationTypeDefinitionKind value Compose. The - value is 2. - :type application_type_definition_kind_filter: int - :param exclude_application_parameters: The flag that specifies whether application parameters - will be excluded from the result. - :type exclude_application_parameters: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedApplicationTypeInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationTypeInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_type_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if application_type_definition_kind_filter is not None: - query_parameters['ApplicationTypeDefinitionKindFilter'] = self._serialize.query("application_type_definition_kind_filter", application_type_definition_kind_filter, 'int') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedApplicationTypeInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_type_info_list.metadata = {'url': '/ApplicationTypes'} # type: ignore - - async def get_application_type_info_list_by_name( - self, - application_type_name: str, - application_type_version: Optional[str] = None, - exclude_application_parameters: Optional[bool] = False, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedApplicationTypeInfoList": - """Gets the list of application types in the Service Fabric cluster matching exactly the specified name. - - Returns the information about the application types that are provisioned or in the process of - being provisioned in the Service Fabric cluster. These results are of application types whose - name match exactly the one specified as the parameter, and which comply with the given query - parameters. All versions of the application type matching the application type name are - returned, with each version returned as one application type. The response includes the name, - version, status, and other details about the application type. This is a paged query, meaning - that if not all of the application types fit in a page, one page of results is returned as well - as a continuation token, which can be used to get the next page. For example, if there are 10 - application types but a page only fits the first three application types, or if max results is - set to 3, then three is returned. To access the rest of the results, retrieve subsequent pages - by using the returned continuation token in the next query. An empty continuation token is - returned if there are no subsequent pages. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param exclude_application_parameters: The flag that specifies whether application parameters - will be excluded from the result. - :type exclude_application_parameters: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedApplicationTypeInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationTypeInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_type_info_list_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if application_type_version is not None: - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedApplicationTypeInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_type_info_list_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}'} # type: ignore - - async def provision_application_type( - self, - provision_application_type_description_base_required_body_param: "_models.ProvisionApplicationTypeDescriptionBase", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Provisions or registers a Service Fabric application type with the cluster using the '.sfpkg' package in the external store or using the application package in the image store. - - Provisions a Service Fabric application type with the cluster. The provision is required before - any new applications can be instantiated. - The provision operation can be performed either on the application package specified by the - relativePathInImageStore, or by using the URI of the external '.sfpkg'. - - :param provision_application_type_description_base_required_body_param: The base type of - provision application type description which supports either image store-based provision or - external store-based provision. - :type provision_application_type_description_base_required_body_param: ~azure.servicefabric.models.ProvisionApplicationTypeDescriptionBase - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.provision_application_type.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(provision_application_type_description_base_required_body_param, 'ProvisionApplicationTypeDescriptionBase') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - provision_application_type.metadata = {'url': '/ApplicationTypes/$/Provision'} # type: ignore - - async def unprovision_application_type( - self, - application_type_name: str, - application_type_version: str, - timeout: Optional[int] = 60, - async_parameter: Optional[bool] = None, - **kwargs - ) -> None: - """Removes or unregisters a Service Fabric application type from the cluster. - - This operation can only be performed if all application instances of the application type have - been deleted. Once the application type is unregistered, no new application instances can be - created for this particular application type. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type as defined in the - application manifest. - :type application_type_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param async_parameter: The flag indicating whether or not unprovision should occur - asynchronously. When set to true, the unprovision operation returns when the request is - accepted by the system, and the unprovision operation continues without any timeout limit. The - default value is false. However, we recommend setting it to true for large application packages - that were provisioned. - :type async_parameter: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _unprovision_application_type_description_info = _models.UnprovisionApplicationTypeDescriptionInfo(application_type_version=application_type_version, async_property=async_parameter) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.unprovision_application_type.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_unprovision_application_type_description_info, 'UnprovisionApplicationTypeDescriptionInfo') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - unprovision_application_type.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/Unprovision'} # type: ignore - - async def get_service_type_info_list( - self, - application_type_name: str, - application_type_version: str, - timeout: Optional[int] = 60, - **kwargs - ) -> List["_models.ServiceTypeInfo"]: - """Gets the list containing the information about service types that are supported by a provisioned application type in a Service Fabric cluster. - - Gets the list containing the information about service types that are supported by a - provisioned application type in a Service Fabric cluster. The provided application type must - exist. Otherwise, a 404 status is returned. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ServiceTypeInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ServiceTypeInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceTypeInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_type_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ServiceTypeInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_type_info_list.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes'} # type: ignore - - async def get_service_type_info_by_name( - self, - application_type_name: str, - application_type_version: str, - service_type_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional["_models.ServiceTypeInfo"]: - """Gets the information about a specific service type that is supported by a provisioned application type in a Service Fabric cluster. - - Gets the information about a specific service type that is supported by a provisioned - application type in a Service Fabric cluster. The provided application type must exist. - Otherwise, a 404 status is returned. A 204 response is returned if the specified service type - is not found in the cluster. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param service_type_name: Specifies the name of a Service Fabric service type. - :type service_type_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceTypeInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceTypeInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServiceTypeInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_type_info_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceTypeInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_type_info_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes/{serviceTypeName}'} # type: ignore - - async def get_service_manifest( - self, - application_type_name: str, - application_type_version: str, - service_manifest_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ServiceTypeManifest": - """Gets the manifest describing a service type. - - Gets the manifest describing a service type. The response contains the service manifest XML as - a string. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceTypeManifest, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceTypeManifest - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceTypeManifest"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_manifest.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceTypeManifest', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceManifest'} # type: ignore - - async def get_deployed_service_type_info_list( - self, - node_name: str, - application_id: str, - service_manifest_name: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> List["_models.DeployedServiceTypeInfo"]: - """Gets the list containing the information about service types from the applications deployed on a node in a Service Fabric cluster. - - Gets the list containing the information about service types from the applications deployed on - a node in a Service Fabric cluster. The response includes the name of the service type, its - registration status, the code package that registered it and activation ID of the service - package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_manifest_name: The name of the service manifest to filter the list of deployed - service type information. If specified, the response will only contain the information about - service types that are defined in this service manifest. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServiceTypeInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedServiceTypeInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_type_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[DeployedServiceTypeInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_type_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes'} # type: ignore - - async def get_deployed_service_type_info_by_name( - self, - node_name: str, - application_id: str, - service_type_name: str, - service_manifest_name: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional[List["_models.DeployedServiceTypeInfo"]]: - """Gets the information about a specified service type of the application deployed on a node in a Service Fabric cluster. - - Gets the list containing the information about a specific service type from the applications - deployed on a node in a Service Fabric cluster. The response includes the name of the service - type, its registration status, the code package that registered it and activation ID of the - service package. Each entry represents one activation of a service type, differentiated by the - activation ID. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_type_name: Specifies the name of a Service Fabric service type. - :type service_type_name: str - :param service_manifest_name: The name of the service manifest to filter the list of deployed - service type information. If specified, the response will only contain the information about - service types that are defined in this service manifest. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServiceTypeInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServiceTypeInfo"]]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_type_info_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServiceTypeInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_type_info_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes/{serviceTypeName}'} # type: ignore - - async def create_application( - self, - application_description: "_models.ApplicationDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Creates a Service Fabric application. - - Creates a Service Fabric application using the specified description. - - :param application_description: Description for creating an application. - :type application_description: ~azure.servicefabric.models.ApplicationDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_application.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(application_description, 'ApplicationDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_application.metadata = {'url': '/Applications/$/Create'} # type: ignore - - async def delete_application( - self, - application_id: str, - force_remove: Optional[bool] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Deletes an existing Service Fabric application. - - An application must be created before it can be deleted. Deleting an application will delete - all services that are part of that application. By default, Service Fabric will try to close - service replicas in a graceful manner and then delete the service. However, if a service is - having issues closing the replica gracefully, the delete operation may take a long time or get - stuck. Use the optional ForceRemove flag to skip the graceful close sequence and forcefully - delete the application and all of its services. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param force_remove: Remove a Service Fabric application or service forcefully without going - through the graceful shutdown sequence. This parameter can be used to forcefully delete an - application or service for which delete is timing out due to issues in the service code that - prevents graceful close of replicas. - :type force_remove: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_application.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force_remove is not None: - query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_application.metadata = {'url': '/Applications/{applicationId}/$/Delete'} # type: ignore - - async def get_application_load_info( - self, - application_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional["_models.ApplicationLoadInfo"]: - """Gets load information about a Service Fabric application. - - Returns the load information about the application that was created or in the process of being - created in the Service Fabric cluster and whose name matches the one specified as the - parameter. The response includes the name, minimum nodes, maximum nodes, the number of nodes - the application is occupying currently, and application load metric information about the - application. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationLoadInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationLoadInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationLoadInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_load_info.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationLoadInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_load_info.metadata = {'url': '/Applications/{applicationId}/$/GetLoadInformation'} # type: ignore - - async def get_application_info_list( - self, - application_definition_kind_filter: Optional[int] = 0, - application_type_name: Optional[str] = None, - exclude_application_parameters: Optional[bool] = False, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedApplicationInfoList": - """Gets the list of applications created in the Service Fabric cluster that match the specified filters. - - Gets the information about the applications that were created or in the process of being - created in the Service Fabric cluster and match the specified filters. The response includes - the name, type, status, parameters, and other details about the application. If the - applications do not fit in a page, one page of results is returned as well as a continuation - token, which can be used to get the next page. Filters ApplicationTypeName and - ApplicationDefinitionKindFilter cannot be specified at the same time. - - :param application_definition_kind_filter: Used to filter on ApplicationDefinitionKind, which - is the mechanism used to define a Service Fabric application. - - - * Default - Default value, which performs the same function as selecting "All". The value is - 0. - * All - Filter that matches input with any ApplicationDefinitionKind value. The value is - 65535. - * ServiceFabricApplicationDescription - Filter that matches input with - ApplicationDefinitionKind value ServiceFabricApplicationDescription. The value is 1. - * Compose - Filter that matches input with ApplicationDefinitionKind value Compose. The value - is 2. - :type application_definition_kind_filter: int - :param application_type_name: The application type name used to filter the applications to - query for. This value should not contain the application type version. - :type application_type_name: str - :param exclude_application_parameters: The flag that specifies whether application parameters - will be excluded from the result. - :type exclude_application_parameters: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedApplicationInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedApplicationInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if application_definition_kind_filter is not None: - query_parameters['ApplicationDefinitionKindFilter'] = self._serialize.query("application_definition_kind_filter", application_definition_kind_filter, 'int') - if application_type_name is not None: - query_parameters['ApplicationTypeName'] = self._serialize.query("application_type_name", application_type_name, 'str') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedApplicationInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_info_list.metadata = {'url': '/Applications'} # type: ignore - - async def get_application_info( - self, - application_id: str, - exclude_application_parameters: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional["_models.ApplicationInfo"]: - """Gets information about a Service Fabric application. - - Returns the information about the application that was created or in the process of being - created in the Service Fabric cluster and whose name matches the one specified as the - parameter. The response includes the name, type, status, parameters, and other details about - the application. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param exclude_application_parameters: The flag that specifies whether application parameters - will be excluded from the result. - :type exclude_application_parameters: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_info.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_info.metadata = {'url': '/Applications/{applicationId}'} # type: ignore - - async def get_application_health( - self, - application_id: str, - events_health_state_filter: Optional[int] = 0, - deployed_applications_health_state_filter: Optional[int] = 0, - services_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ApplicationHealth": - """Gets the health of the service fabric application. - - Returns the heath state of the service fabric application. The response reports either Ok, - Error or Warning health state. If the entity is not found in the health store, it will return - Error. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param deployed_applications_health_state_filter: Allows filtering of the deployed applications - health state objects returned in the result of application health query based on their health - state. - The possible values for this parameter include integer value of one of the following health - states. Only deployed applications that match the filter will be returned. - All deployed applications are used to evaluate the aggregated health state. If not specified, - all entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values, obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of deployed applications with - HealthState value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type deployed_applications_health_state_filter: int - :param services_health_state_filter: Allows filtering of the services health state objects - returned in the result of services health query based on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only services that match the filter are returned. All services are used to evaluate the - aggregated health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, - obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health - state of services with HealthState value of OK (2) and Warning (4) will be returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type services_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_health.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_applications_health_state_filter is not None: - query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') - if services_health_state_filter is not None: - query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_health.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} # type: ignore - - async def get_application_health_using_policy( - self, - application_id: str, - events_health_state_filter: Optional[int] = 0, - deployed_applications_health_state_filter: Optional[int] = 0, - services_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, - **kwargs - ) -> "_models.ApplicationHealth": - """Gets the health of a Service Fabric application using the specified policy. - - Gets the health of a Service Fabric application. Use EventsHealthStateFilter to filter the - collection of health events reported on the node based on the health state. Use - ClusterHealthPolicies to override the health policies used to evaluate the health. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param deployed_applications_health_state_filter: Allows filtering of the deployed applications - health state objects returned in the result of application health query based on their health - state. - The possible values for this parameter include integer value of one of the following health - states. Only deployed applications that match the filter will be returned. - All deployed applications are used to evaluate the aggregated health state. If not specified, - all entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values, obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of deployed applications with - HealthState value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type deployed_applications_health_state_filter: int - :param services_health_state_filter: Allows filtering of the services health state objects - returned in the result of services health query based on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only services that match the filter are returned. All services are used to evaluate the - aggregated health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, - obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health - state of services with HealthState value of OK (2) and Warning (4) will be returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type services_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_application_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_applications_health_state_filter is not None: - query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') - if services_health_state_filter is not None: - query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_health_using_policy.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} # type: ignore - - async def report_application_health( - self, - application_id: str, - health_information: "_models.HealthInformation", - immediate: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Sends a health report on the Service Fabric application. - - Reports health state of the specified Service Fabric application. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway Application, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, get application health and check - that the report appears in the HealthEvents section. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_application_health.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_application_health.metadata = {'url': '/Applications/{applicationId}/$/ReportHealth'} # type: ignore - - async def start_application_upgrade( - self, - application_id: str, - application_upgrade_description: "_models.ApplicationUpgradeDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Starts upgrading an application in the Service Fabric cluster. - - Validates the supplied application upgrade parameters and starts upgrading the application if - the parameters are valid. - Note, `ApplicationParameter - `_\ - s are not preserved across an application upgrade. - In order to preserve current application parameters, the user should get the parameters using - `GetApplicationInfo <./GetApplicationInfo.md>`_ operation first and pass them into the upgrade - API call as shown in the example. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param application_upgrade_description: Parameters for an application upgrade. - :type application_upgrade_description: ~azure.servicefabric.models.ApplicationUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(application_upgrade_description, 'ApplicationUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/Upgrade'} # type: ignore - - async def get_application_upgrade( - self, - application_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ApplicationUpgradeProgressInfo": - """Gets details for the latest upgrade performed on this application. - - Returns information about the state of the latest application upgrade along with details to aid - debugging application health issues. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationUpgradeProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationUpgradeProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationUpgradeProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationUpgradeProgressInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/GetUpgradeProgress'} # type: ignore - - async def update_application_upgrade( - self, - application_id: str, - application_upgrade_update_description: "_models.ApplicationUpgradeUpdateDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Updates an ongoing application upgrade in the Service Fabric cluster. - - Updates the parameters of an ongoing application upgrade from the ones specified at the time of - starting the application upgrade. This may be required to mitigate stuck application upgrades - due to incorrect parameters or issues in the application to make progress. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param application_upgrade_update_description: Parameters for updating an existing application - upgrade. - :type application_upgrade_update_description: ~azure.servicefabric.models.ApplicationUpgradeUpdateDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(application_upgrade_update_description, 'ApplicationUpgradeUpdateDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - update_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/UpdateUpgrade'} # type: ignore - - async def resume_application_upgrade( - self, - application_id: str, - upgrade_domain_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Resumes upgrading an application in the Service Fabric cluster. - - Resumes an unmonitored manual Service Fabric application upgrade. Service Fabric upgrades one - upgrade domain at a time. For unmonitored manual upgrades, after Service Fabric finishes an - upgrade domain, it waits for you to call this API before proceeding to the next upgrade domain. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param upgrade_domain_name: The name of the upgrade domain in which to resume the upgrade. - :type upgrade_domain_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _resume_application_upgrade_description = _models.ResumeApplicationUpgradeDescription(upgrade_domain_name=upgrade_domain_name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.resume_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_resume_application_upgrade_description, 'ResumeApplicationUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/MoveToNextUpgradeDomain'} # type: ignore - - async def rollback_application_upgrade( - self, - application_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Starts rolling back the currently on-going upgrade of an application in the Service Fabric cluster. - - Starts rolling back the current application upgrade to the previous version. This API can only - be used to roll back the current in-progress upgrade that is rolling forward to new version. If - the application is not currently being upgraded use StartApplicationUpgrade API to upgrade it - to desired version, including rolling back to a previous version. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.rollback_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - rollback_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/RollbackUpgrade'} # type: ignore - - async def get_deployed_application_info_list( - self, - node_name: str, - timeout: Optional[int] = 60, - include_health_state: Optional[bool] = False, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - **kwargs - ) -> "_models.PagedDeployedApplicationInfoList": - """Gets the list of applications deployed on a Service Fabric node. - - Gets the list of applications deployed on a Service Fabric node. The results do not include - information about deployed system applications unless explicitly queried for by ID. Results - encompass deployed applications in active, activating, and downloading states. This query - requires that the node name corresponds to a node on the cluster. The query fails if the - provided node name does not point to any active Service Fabric nodes on the cluster. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param include_health_state: Include the health state of an entity. - If this parameter is false or not specified, then the health state returned is "Unknown". - When set to true, the query goes in parallel to the node and the health system service before - the results are merged. - As a result, the query is more expensive and may take a longer time. - :type include_health_state: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedDeployedApplicationInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedDeployedApplicationInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedDeployedApplicationInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_application_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if include_health_state is not None: - query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedDeployedApplicationInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_application_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications'} # type: ignore - - async def get_deployed_application_info( - self, - node_name: str, - application_id: str, - timeout: Optional[int] = 60, - include_health_state: Optional[bool] = False, - **kwargs - ) -> Optional["_models.DeployedApplicationInfo"]: - """Gets the information about an application deployed on a Service Fabric node. - - This query returns system application information if the application ID provided is for system - application. Results encompass deployed applications in active, activating, and downloading - states. This query requires that the node name corresponds to a node on the cluster. The query - fails if the provided node name does not point to any active Service Fabric nodes on the - cluster. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param include_health_state: Include the health state of an entity. - If this parameter is false or not specified, then the health state returned is "Unknown". - When set to true, the query goes in parallel to the node and the health system service before - the results are merged. - As a result, the query is more expensive and may take a longer time. - :type include_health_state: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedApplicationInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedApplicationInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.DeployedApplicationInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_application_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if include_health_state is not None: - query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('DeployedApplicationInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_application_info.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}'} # type: ignore - - async def get_deployed_application_health( - self, - node_name: str, - application_id: str, - events_health_state_filter: Optional[int] = 0, - deployed_service_packages_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.DeployedApplicationHealth": - """Gets the information about health of an application deployed on a Service Fabric node. - - Gets the information about health of an application deployed on a Service Fabric node. Use - EventsHealthStateFilter to optionally filter for the collection of HealthEvent objects reported - on the deployed application based on health state. Use DeployedServicePackagesHealthStateFilter - to optionally filter for DeployedServicePackageHealth children based on health state. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param deployed_service_packages_health_state_filter: Allows filtering of the deployed service - package health state objects returned in the result of deployed application health query based - on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only deployed service packages that match the filter are returned. All deployed service - packages are used to evaluate the aggregated health state of the deployed application. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value can be a combination of these - values, obtained using the bitwise 'OR' operator. - For example, if the provided value is 6 then health state of service packages with HealthState - value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type deployed_service_packages_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedApplicationHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedApplicationHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedApplicationHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_application_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_service_packages_health_state_filter is not None: - query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedApplicationHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} # type: ignore - - async def get_deployed_application_health_using_policy( - self, - node_name: str, - application_id: str, - events_health_state_filter: Optional[int] = 0, - deployed_service_packages_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, - **kwargs - ) -> "_models.DeployedApplicationHealth": - """Gets the information about health of an application deployed on a Service Fabric node. using the specified policy. - - Gets the information about health of an application deployed on a Service Fabric node using the - specified policy. Use EventsHealthStateFilter to optionally filter for the collection of - HealthEvent objects reported on the deployed application based on health state. Use - DeployedServicePackagesHealthStateFilter to optionally filter for DeployedServicePackageHealth - children based on health state. Use ApplicationHealthPolicy to optionally override the health - policies used to evaluate the health. This API only uses 'ConsiderWarningAsError' field of the - ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the - deployed application. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param deployed_service_packages_health_state_filter: Allows filtering of the deployed service - package health state objects returned in the result of deployed application health query based - on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only deployed service packages that match the filter are returned. All deployed service - packages are used to evaluate the aggregated health state of the deployed application. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value can be a combination of these - values, obtained using the bitwise 'OR' operator. - For example, if the provided value is 6 then health state of service packages with HealthState - value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type deployed_service_packages_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedApplicationHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedApplicationHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedApplicationHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_deployed_application_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_service_packages_health_state_filter is not None: - query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedApplicationHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_application_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} # type: ignore - - async def report_deployed_application_health( - self, - node_name: str, - application_id: str, - health_information: "_models.HealthInformation", - immediate: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Sends a health report on the Service Fabric application deployed on a Service Fabric node. - - Reports health state of the application deployed on a Service Fabric node. The report must - contain the information about the source of the health report and property on which it is - reported. - The report is sent to a Service Fabric gateway Service, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, get deployed application health and - check that the report appears in the HealthEvents section. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_deployed_application_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/ReportHealth'} # type: ignore - - async def get_application_manifest( - self, - application_type_name: str, - application_type_version: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ApplicationTypeManifest": - """Gets the manifest describing an application type. - - The response contains the application manifest XML as a string. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationTypeManifest, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationTypeManifest - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationTypeManifest"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_manifest.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationTypeManifest', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetApplicationManifest'} # type: ignore - - async def get_service_info_list( - self, - application_id: str, - service_type_name: Optional[str] = None, - continuation_token_parameter: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedServiceInfoList": - """Gets the information about all services belonging to the application specified by the application ID. - - Returns the information about all services belonging to the application specified by the - application ID. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_type_name: The service type name used to filter the services to query for. - :type service_type_name: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedServiceInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedServiceInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if service_type_name is not None: - query_parameters['ServiceTypeName'] = self._serialize.query("service_type_name", service_type_name, 'str') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedServiceInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_info_list.metadata = {'url': '/Applications/{applicationId}/$/GetServices'} # type: ignore - - async def get_service_info( - self, - application_id: str, - service_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional["_models.ServiceInfo"]: - """Gets the information about the specific service belonging to the Service Fabric application. - - Returns the information about the specified service belonging to the specified Service Fabric - application. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServiceInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_info.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_info.metadata = {'url': '/Applications/{applicationId}/$/GetServices/{serviceId}'} # type: ignore - - async def get_application_name_info( - self, - service_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ApplicationNameInfo": - """Gets the name of the Service Fabric application for a service. - - Gets the name of the application for the specified service. A 404 - FABRIC_E_SERVICE_DOES_NOT_EXIST error is returned if a service with the provided service ID - does not exist. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationNameInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationNameInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationNameInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_name_info.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationNameInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_name_info.metadata = {'url': '/Services/{serviceId}/$/GetApplicationName'} # type: ignore - - async def create_service( - self, - application_id: str, - service_description: "_models.ServiceDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Creates the specified Service Fabric service. - - This api allows creating a new Service Fabric stateless or stateful service under a specified - Service Fabric application. The description for creating the service includes partitioning - information and optional properties for placement and load balancing. Some of the properties - can later be modified using ``UpdateService`` API. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_description: The information necessary to create a service. - :type service_description: ~azure.servicefabric.models.ServiceDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_service.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(service_description, 'ServiceDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_service.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/Create'} # type: ignore - - async def create_service_from_template( - self, - application_id: str, - service_from_template_description: "_models.ServiceFromTemplateDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Creates a Service Fabric service from the service template. - - Creates a Service Fabric service from the service template defined in the application manifest. - A service template contains the properties that will be same for the service instance of the - same type. The API allows overriding the properties that are usually different for different - services of the same service type. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_from_template_description: Describes the service that needs to be created from - the template defined in the application manifest. - :type service_from_template_description: ~azure.servicefabric.models.ServiceFromTemplateDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_service_from_template.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(service_from_template_description, 'ServiceFromTemplateDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_service_from_template.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/CreateFromTemplate'} # type: ignore - - async def delete_service( - self, - service_id: str, - force_remove: Optional[bool] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Deletes an existing Service Fabric service. - - A service must be created before it can be deleted. By default, Service Fabric will try to - close service replicas in a graceful manner and then delete the service. However, if the - service is having issues closing the replica gracefully, the delete operation may take a long - time or get stuck. Use the optional ForceRemove flag to skip the graceful close sequence and - forcefully delete the service. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param force_remove: Remove a Service Fabric application or service forcefully without going - through the graceful shutdown sequence. This parameter can be used to forcefully delete an - application or service for which delete is timing out due to issues in the service code that - prevents graceful close of replicas. - :type force_remove: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_service.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force_remove is not None: - query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_service.metadata = {'url': '/Services/{serviceId}/$/Delete'} # type: ignore - - async def update_service( - self, - service_id: str, - service_update_description: "_models.ServiceUpdateDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Updates a Service Fabric service using the specified update description. - - This API allows updating properties of a running Service Fabric service. The set of properties - that can be updated are a subset of the properties that were specified at the time of creating - the service. The current set of properties can be obtained using ``GetServiceDescription`` API. - Note that updating the properties of a running service is different than upgrading your - application using ``StartApplicationUpgrade`` API. The upgrade is a long running background - operation that involves moving the application from one version to another, one upgrade domain - at a time, whereas update applies the new properties immediately to the service. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param service_update_description: The information necessary to update a service. - :type service_update_description: ~azure.servicefabric.models.ServiceUpdateDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_service.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(service_update_description, 'ServiceUpdateDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - update_service.metadata = {'url': '/Services/{serviceId}/$/Update'} # type: ignore - - async def get_service_description( - self, - service_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ServiceDescription": - """Gets the description of an existing Service Fabric service. - - Gets the description of an existing Service Fabric service. A service must be created before - its description can be obtained. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_description.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_description.metadata = {'url': '/Services/{serviceId}/$/GetDescription'} # type: ignore - - async def get_service_health( - self, - service_id: str, - events_health_state_filter: Optional[int] = 0, - partitions_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ServiceHealth": - """Gets the health of the specified Service Fabric service. - - Gets the health information of the specified service. - Use EventsHealthStateFilter to filter the collection of health events reported on the service - based on the health state. - Use PartitionsHealthStateFilter to filter the collection of partitions returned. - If you specify a service that does not exist in the health store, this request returns an - error. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param partitions_health_state_filter: Allows filtering of the partitions health state objects - returned in the result of service health query based on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only partitions that match the filter are returned. All partitions are used to evaluate the - aggregated health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these value - obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health - state of partitions with HealthState value of OK (2) and Warning (4) will be returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type partitions_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_health.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if partitions_health_state_filter is not None: - query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_health.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} # type: ignore - - async def get_service_health_using_policy( - self, - service_id: str, - events_health_state_filter: Optional[int] = 0, - partitions_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, - **kwargs - ) -> "_models.ServiceHealth": - """Gets the health of the specified Service Fabric service, by using the specified health policy. - - Gets the health information of the specified service. - If the application health policy is specified, the health evaluation uses it to get the - aggregated health state. - If the policy is not specified, the health evaluation uses the application health policy - defined in the application manifest, or the default health policy, if no policy is defined in - the manifest. - Use EventsHealthStateFilter to filter the collection of health events reported on the service - based on the health state. - Use PartitionsHealthStateFilter to filter the collection of partitions returned. - If you specify a service that does not exist in the health store, this request returns an - error. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param partitions_health_state_filter: Allows filtering of the partitions health state objects - returned in the result of service health query based on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only partitions that match the filter are returned. All partitions are used to evaluate the - aggregated health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these value - obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health - state of partitions with HealthState value of OK (2) and Warning (4) will be returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type partitions_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_service_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if partitions_health_state_filter is not None: - query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_health_using_policy.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} # type: ignore - - async def report_service_health( - self, - service_id: str, - health_information: "_models.HealthInformation", - immediate: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Sends a health report on the Service Fabric service. - - Reports health state of the specified Service Fabric service. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway Service, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetServiceHealth and check that - the report appears in the HealthEvents section. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_service_health.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_service_health.metadata = {'url': '/Services/{serviceId}/$/ReportHealth'} # type: ignore - - async def resolve_service( - self, - service_id: str, - partition_key_type: Optional[int] = None, - partition_key_value: Optional[str] = None, - previous_rsp_version: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ResolvedServicePartition": - """Resolve a Service Fabric partition. - - Resolve a Service Fabric service partition to get the endpoints of the service replicas. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_key_type: Key type for the partition. This parameter is required if the - partition scheme for the service is Int64Range or Named. The possible values are following. - - - * None (1) - Indicates that the PartitionKeyValue parameter is not specified. This is valid - for the partitions with partitioning scheme as Singleton. This is the default value. The value - is 1. - * Int64Range (2) - Indicates that the PartitionKeyValue parameter is an int64 partition key. - This is valid for the partitions with partitioning scheme as Int64Range. The value is 2. - * Named (3) - Indicates that the PartitionKeyValue parameter is a name of the partition. This - is valid for the partitions with partitioning scheme as Named. The value is 3. - :type partition_key_type: int - :param partition_key_value: Partition key. This is required if the partition scheme for the - service is Int64Range or Named. - This is not the partition ID, but rather, either the integer key value, or the name of the - partition ID. - For example, if your service is using ranged partitions from 0 to 10, then they - PartitionKeyValue would be an - integer in that range. Query service description to see the range or name. - :type partition_key_value: str - :param previous_rsp_version: The value in the Version field of the response that was received - previously. This is required if the user knows that the result that was gotten previously is - stale. - :type previous_rsp_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ResolvedServicePartition, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ResolvedServicePartition - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ResolvedServicePartition"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.resolve_service.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if partition_key_type is not None: - query_parameters['PartitionKeyType'] = self._serialize.query("partition_key_type", partition_key_type, 'int') - if partition_key_value is not None: - query_parameters['PartitionKeyValue'] = self._serialize.query("partition_key_value", partition_key_value, 'str', skip_quote=True) - if previous_rsp_version is not None: - query_parameters['PreviousRspVersion'] = self._serialize.query("previous_rsp_version", previous_rsp_version, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ResolvedServicePartition', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - resolve_service.metadata = {'url': '/Services/{serviceId}/$/ResolvePartition'} # type: ignore - - async def get_unplaced_replica_information( - self, - service_id: str, - partition_id: Optional[str] = None, - only_query_primaries: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.UnplacedReplicaInformation": - """Gets the information about unplaced replica of the service. - - Returns the information about the unplaced replicas of the service. - If PartitionId is specified, then result will contain information only about unplaced replicas - for that partition. - If PartitionId is not specified, then result will contain information about unplaced replicas - for all partitions of that service. - If OnlyQueryPrimaries is set to true, then result will contain information only about primary - replicas, and will ignore unplaced secondary replicas. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param only_query_primaries: Indicates that unplaced replica information will be queries only - for primary replicas. - :type only_query_primaries: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UnplacedReplicaInformation, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UnplacedReplicaInformation - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UnplacedReplicaInformation"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_unplaced_replica_information.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if partition_id is not None: - query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') - if only_query_primaries is not None: - query_parameters['OnlyQueryPrimaries'] = self._serialize.query("only_query_primaries", only_query_primaries, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UnplacedReplicaInformation', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_unplaced_replica_information.metadata = {'url': '/Services/{serviceId}/$/GetUnplacedReplicaInformation'} # type: ignore - - async def get_loaded_partition_info_list( - self, - metric_name: str, - service_name: Optional[str] = None, - ordering: Optional[Union[str, "_models.Ordering"]] = None, - max_results: Optional[int] = 0, - continuation_token_parameter: Optional[str] = None, - **kwargs - ) -> "_models.LoadedPartitionInformationResultList": - """Gets ordered list of partitions. - - Retrieves partitions which are most/least loaded according to specified metric. - - :param metric_name: Name of the metric based on which to get ordered list of partitions. - :type metric_name: str - :param service_name: The name of a service. - :type service_name: str - :param ordering: Ordering of partitions' load. - :type ordering: str or ~azure.servicefabric.models.Ordering - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: LoadedPartitionInformationResultList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.LoadedPartitionInformationResultList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadedPartitionInformationResultList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_loaded_partition_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['MetricName'] = self._serialize.query("metric_name", metric_name, 'str') - if service_name is not None: - query_parameters['ServiceName'] = self._serialize.query("service_name", service_name, 'str') - if ordering is not None: - query_parameters['Ordering'] = self._serialize.query("ordering", ordering, 'str') - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('LoadedPartitionInformationResultList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_loaded_partition_info_list.metadata = {'url': '/$/GetLoadedPartitionInfoList'} # type: ignore - - async def get_partition_info_list( - self, - service_id: str, - continuation_token_parameter: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedServicePartitionInfoList": - """Gets the list of partitions of a Service Fabric service. - - The response includes the partition ID, partitioning scheme information, keys supported by the - partition, status, health, and other details about the partition. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedServicePartitionInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedServicePartitionInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServicePartitionInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedServicePartitionInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_info_list.metadata = {'url': '/Services/{serviceId}/$/GetPartitions'} # type: ignore - - async def get_partition_info( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional["_models.ServicePartitionInfo"]: - """Gets the information about a Service Fabric partition. - - Gets the information about the specified partition. The response includes the partition ID, - partitioning scheme information, keys supported by the partition, status, health, and other - details about the partition. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServicePartitionInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServicePartitionInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServicePartitionInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_info.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServicePartitionInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_info.metadata = {'url': '/Partitions/{partitionId}'} # type: ignore - - async def get_service_name_info( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ServiceNameInfo": - """Gets the name of the Service Fabric service for a partition. - - Gets name of the service for the specified partition. A 404 error is returned if the partition - ID does not exist in the cluster. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceNameInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceNameInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceNameInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_name_info.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceNameInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_name_info.metadata = {'url': '/Partitions/{partitionId}/$/GetServiceName'} # type: ignore - - async def get_partition_health( - self, - partition_id: str, - events_health_state_filter: Optional[int] = 0, - replicas_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PartitionHealth": - """Gets the health of the specified Service Fabric partition. - - Use EventsHealthStateFilter to filter the collection of health events reported on the service - based on the health state. - Use ReplicasHealthStateFilter to filter the collection of ReplicaHealthState objects on the - partition. - If you specify a partition that does not exist in the health store, this request returns an - error. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param replicas_health_state_filter: Allows filtering the collection of ReplicaHealthState - objects on the partition. The value can be obtained from members or bitwise operations on - members of HealthStateFilter. Only replicas that match the filter will be returned. All - replicas will be used to evaluate the aggregated health state. If not specified, all entries - will be returned.The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. For example, If the provided - value is 6 then all of the events with HealthState value of OK (2) and Warning (4) will be - returned. The possible values for this parameter include integer value of one of the following - health states. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type replicas_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_health.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if replicas_health_state_filter is not None: - query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} # type: ignore - - async def get_partition_health_using_policy( - self, - partition_id: str, - events_health_state_filter: Optional[int] = 0, - replicas_health_state_filter: Optional[int] = 0, - exclude_health_statistics: Optional[bool] = False, - timeout: Optional[int] = 60, - application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, - **kwargs - ) -> "_models.PartitionHealth": - """Gets the health of the specified Service Fabric partition, by using the specified health policy. - - Gets the health information of the specified partition. - If the application health policy is specified, the health evaluation uses it to get the - aggregated health state. - If the policy is not specified, the health evaluation uses the application health policy - defined in the application manifest, or the default health policy, if no policy is defined in - the manifest. - Use EventsHealthStateFilter to filter the collection of health events reported on the partition - based on the health state. - Use ReplicasHealthStateFilter to filter the collection of ReplicaHealthState objects on the - partition. Use ApplicationHealthPolicy in the POST body to override the health policies used to - evaluate the health. - If you specify a partition that does not exist in the health store, this request returns an - error. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param replicas_health_state_filter: Allows filtering the collection of ReplicaHealthState - objects on the partition. The value can be obtained from members or bitwise operations on - members of HealthStateFilter. Only replicas that match the filter will be returned. All - replicas will be used to evaluate the aggregated health state. If not specified, all entries - will be returned.The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. For example, If the provided - value is 6 then all of the events with HealthState value of OK (2) and Warning (4) will be - returned. The possible values for this parameter include integer value of one of the following - health states. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type replicas_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_partition_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if replicas_health_state_filter is not None: - query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} # type: ignore - - async def report_partition_health( - self, - partition_id: str, - health_information: "_models.HealthInformation", - immediate: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Sends a health report on the Service Fabric partition. - - Reports health state of the specified Service Fabric partition. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway Partition, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetPartitionHealth and check - that the report appears in the HealthEvents section. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_partition_health.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/ReportHealth'} # type: ignore - - async def get_partition_load_information( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PartitionLoadInformation": - """Gets the load information of the specified Service Fabric partition. - - Returns information about the load of a specified partition. - The response includes a list of load reports for a Service Fabric partition. - Each report includes the load metric name, value, and last reported time in UTC. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionLoadInformation, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionLoadInformation - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionLoadInformation"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_load_information.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionLoadInformation', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_load_information.metadata = {'url': '/Partitions/{partitionId}/$/GetLoadInformation'} # type: ignore - - async def reset_partition_load( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Resets the current load of a Service Fabric partition. - - Resets the current load of a Service Fabric partition to the default load for the service. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.reset_partition_load.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - reset_partition_load.metadata = {'url': '/Partitions/{partitionId}/$/ResetLoad'} # type: ignore - - async def recover_partition( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Indicates to the Service Fabric cluster that it should attempt to recover a specific partition that is currently stuck in quorum loss. - - This operation should only be performed if it is known that the replicas that are down cannot - be recovered. Incorrect use of this API can cause potential data loss. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.recover_partition.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - recover_partition.metadata = {'url': '/Partitions/{partitionId}/$/Recover'} # type: ignore - - async def recover_service_partitions( - self, - service_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Indicates to the Service Fabric cluster that it should attempt to recover the specified service that is currently stuck in quorum loss. - - Indicates to the Service Fabric cluster that it should attempt to recover the specified service - that is currently stuck in quorum loss. This operation should only be performed if it is known - that the replicas that are down cannot be recovered. Incorrect use of this API can cause - potential data loss. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.recover_service_partitions.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - recover_service_partitions.metadata = {'url': '/Services/$/{serviceId}/$/GetPartitions/$/Recover'} # type: ignore - - async def recover_system_partitions( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Indicates to the Service Fabric cluster that it should attempt to recover the system services that are currently stuck in quorum loss. - - Indicates to the Service Fabric cluster that it should attempt to recover the system services - that are currently stuck in quorum loss. This operation should only be performed if it is known - that the replicas that are down cannot be recovered. Incorrect use of this API can cause - potential data loss. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.recover_system_partitions.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - recover_system_partitions.metadata = {'url': '/$/RecoverSystemPartitions'} # type: ignore - - async def recover_all_partitions( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Indicates to the Service Fabric cluster that it should attempt to recover any services (including system services) which are currently stuck in quorum loss. - - This operation should only be performed if it is known that the replicas that are down cannot - be recovered. Incorrect use of this API can cause potential data loss. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.recover_all_partitions.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - recover_all_partitions.metadata = {'url': '/$/RecoverAllPartitions'} # type: ignore - - async def move_primary_replica( - self, - partition_id: str, - node_name: Optional[str] = None, - ignore_constraints: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Moves the primary replica of a partition of a stateful service. - - This command moves the primary replica of a partition of a stateful service, respecting all - constraints. - If NodeName parameter is specified, primary will be moved to the specified node (if constraints - allow it). - If NodeName parameter is not specified, primary replica will be moved to a random node in the - cluster. - If IgnoreConstraints parameter is specified and set to true, then primary will be moved - regardless of the constraints. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param node_name: The name of the node. - :type node_name: str - :param ignore_constraints: Ignore constraints when moving a replica or instance. If this - parameter is not specified, all constraints are honored. - :type ignore_constraints: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.move_primary_replica.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if node_name is not None: - query_parameters['NodeName'] = self._serialize.query("node_name", node_name, 'str') - if ignore_constraints is not None: - query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - move_primary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MovePrimaryReplica'} # type: ignore - - async def move_secondary_replica( - self, - partition_id: str, - current_node_name: str, - new_node_name: Optional[str] = None, - ignore_constraints: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Moves the secondary replica of a partition of a stateful service. - - This command moves the secondary replica of a partition of a stateful service, respecting all - constraints. - CurrentNodeName parameter must be specified to identify the replica that is moved. - Source node name must be specified, but new node name can be omitted, and in that case replica - is moved to a random node. - If IgnoreConstraints parameter is specified and set to true, then secondary will be moved - regardless of the constraints. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param current_node_name: The name of the source node for secondary replica move. - :type current_node_name: str - :param new_node_name: The name of the target node for secondary replica or instance move. If - not specified, replica or instance is moved to a random node. - :type new_node_name: str - :param ignore_constraints: Ignore constraints when moving a replica or instance. If this - parameter is not specified, all constraints are honored. - :type ignore_constraints: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.move_secondary_replica.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') - if new_node_name is not None: - query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') - if ignore_constraints is not None: - query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - move_secondary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MoveSecondaryReplica'} # type: ignore - - async def update_partition_load( - self, - partition_metric_load_description_list: List["_models.PartitionMetricLoadDescription"], - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedUpdatePartitionLoadResultList": - """Update the loads of provided partitions for specific metrics. - - Updates the load value and predicted load value for all the partitions provided for specified - metrics. - - :param partition_metric_load_description_list: Description of updating load for list of - partitions. - :type partition_metric_load_description_list: list[~azure.servicefabric.models.PartitionMetricLoadDescription] - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedUpdatePartitionLoadResultList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedUpdatePartitionLoadResultList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedUpdatePartitionLoadResultList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_partition_load.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(partition_metric_load_description_list, '[PartitionMetricLoadDescription]') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedUpdatePartitionLoadResultList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - update_partition_load.metadata = {'url': '/$/UpdatePartitionLoad'} # type: ignore - - async def move_instance( - self, - service_id: str, - partition_id: str, - current_node_name: Optional[str] = None, - new_node_name: Optional[str] = None, - ignore_constraints: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Moves the instance of a partition of a stateless service. - - This command moves the instance of a partition of a stateless service, respecting all - constraints. - Partition id and service name must be specified to be able to move the instance. - CurrentNodeName when specified identifies the instance that is moved. If not specified, random - instance will be moved - New node name can be omitted, and in that case instance is moved to a random node. - If IgnoreConstraints parameter is specified and set to true, then instance will be moved - regardless of the constraints. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param current_node_name: The name of the source node for instance move. If not specified, - instance is moved from a random node. - :type current_node_name: str - :param new_node_name: The name of the target node for secondary replica or instance move. If - not specified, replica or instance is moved to a random node. - :type new_node_name: str - :param ignore_constraints: Ignore constraints when moving a replica or instance. If this - parameter is not specified, all constraints are honored. - :type ignore_constraints: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.move_instance.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if current_node_name is not None: - query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') - if new_node_name is not None: - query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') - if ignore_constraints is not None: - query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - move_instance.metadata = {'url': '/Services/{serviceId}/$/GetPartitions/{partitionId}/$/MoveInstance'} # type: ignore - - async def create_repair_task( - self, - repair_task: "_models.RepairTask", - **kwargs - ) -> "_models.RepairTaskUpdateInfo": - """Creates a new repair task. - - For clusters that have the Repair Manager Service configured, - this API provides a way to create repair tasks that run automatically or manually. - For repair tasks that run automatically, an appropriate repair executor - must be running for each repair action to run automatically. - These are currently only available in specially-configured Azure Cloud Services. - - To create a manual repair task, provide the set of impacted node names and the - expected impact. When the state of the created repair task changes to approved, - you can safely perform repair actions on those nodes. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param repair_task: Describes the repair task to be created or updated. - :type repair_task: ~azure.servicefabric.models.RepairTask - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_repair_task.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(repair_task, 'RepairTask') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - create_repair_task.metadata = {'url': '/$/CreateRepairTask'} # type: ignore - - async def cancel_repair_task( - self, - repair_task_cancel_description: "_models.RepairTaskCancelDescription", - **kwargs - ) -> "_models.RepairTaskUpdateInfo": - """Requests the cancellation of the given repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param repair_task_cancel_description: Describes the repair task to be cancelled. - :type repair_task_cancel_description: ~azure.servicefabric.models.RepairTaskCancelDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.cancel_repair_task.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(repair_task_cancel_description, 'RepairTaskCancelDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - cancel_repair_task.metadata = {'url': '/$/CancelRepairTask'} # type: ignore - - async def delete_repair_task( - self, - task_id: str, - version: Optional[str] = None, - **kwargs - ) -> None: - """Deletes a completed repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param task_id: The ID of the completed repair task to be deleted. - :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. - :type version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _repair_task_delete_description = _models.RepairTaskDeleteDescription(task_id=task_id, version=version) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.delete_repair_task.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_repair_task_delete_description, 'RepairTaskDeleteDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_repair_task.metadata = {'url': '/$/DeleteRepairTask'} # type: ignore - - async def get_repair_task_list( - self, - task_id_filter: Optional[str] = None, - state_filter: Optional[int] = None, - executor_filter: Optional[str] = None, - **kwargs - ) -> List["_models.RepairTask"]: - """Gets a list of repair tasks matching the given filters. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param task_id_filter: The repair task ID prefix to be matched. - :type task_id_filter: str - :param state_filter: A bitwise-OR of the following values, specifying which task states should - be included in the result list. - - - * 1 - Created - * 2 - Claimed - * 4 - Preparing - * 8 - Approved - * 16 - Executing - * 32 - Restoring - * 64 - Completed. - :type state_filter: int - :param executor_filter: The name of the repair executor whose claimed tasks should be included - in the list. - :type executor_filter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of RepairTask, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.RepairTask] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.RepairTask"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_repair_task_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if task_id_filter is not None: - query_parameters['TaskIdFilter'] = self._serialize.query("task_id_filter", task_id_filter, 'str') - if state_filter is not None: - query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') - if executor_filter is not None: - query_parameters['ExecutorFilter'] = self._serialize.query("executor_filter", executor_filter, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[RepairTask]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_repair_task_list.metadata = {'url': '/$/GetRepairTaskList'} # type: ignore - - async def force_approve_repair_task( - self, - task_id: str, - version: Optional[str] = None, - **kwargs - ) -> "_models.RepairTaskUpdateInfo": - """Forces the approval of the given repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param task_id: The ID of the repair task. - :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. - :type version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _repair_task_approve_description = _models.RepairTaskApproveDescription(task_id=task_id, version=version) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.force_approve_repair_task.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_repair_task_approve_description, 'RepairTaskApproveDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - force_approve_repair_task.metadata = {'url': '/$/ForceApproveRepairTask'} # type: ignore - - async def update_repair_task_health_policy( - self, - repair_task_update_health_policy_description: "_models.RepairTaskUpdateHealthPolicyDescription", - **kwargs - ) -> "_models.RepairTaskUpdateInfo": - """Updates the health policy of the given repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param repair_task_update_health_policy_description: Describes the repair task healthy policy - to be updated. - :type repair_task_update_health_policy_description: ~azure.servicefabric.models.RepairTaskUpdateHealthPolicyDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_repair_task_health_policy.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(repair_task_update_health_policy_description, 'RepairTaskUpdateHealthPolicyDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - update_repair_task_health_policy.metadata = {'url': '/$/UpdateRepairTaskHealthPolicy'} # type: ignore - - async def update_repair_execution_state( - self, - repair_task: "_models.RepairTask", - **kwargs - ) -> "_models.RepairTaskUpdateInfo": - """Updates the execution state of a repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param repair_task: Describes the repair task to be created or updated. - :type repair_task: ~azure.servicefabric.models.RepairTask - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_repair_execution_state.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(repair_task, 'RepairTask') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - update_repair_execution_state.metadata = {'url': '/$/UpdateRepairExecutionState'} # type: ignore - - async def get_replica_info_list( - self, - partition_id: str, - continuation_token_parameter: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedReplicaInfoList": - """Gets the information about replicas of a Service Fabric service partition. - - The GetReplicas endpoint returns information about the replicas of the specified partition. The - response includes the ID, role, status, health, node name, uptime, and other details about the - replica. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedReplicaInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedReplicaInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedReplicaInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_replica_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedReplicaInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_replica_info_list.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas'} # type: ignore - - async def get_replica_info( - self, - partition_id: str, - replica_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional["_models.ReplicaInfo"]: - """Gets the information about a replica of a Service Fabric partition. - - The response includes the ID, role, status, health, node name, uptime, and other details about - the replica. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ReplicaInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ReplicaInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicaInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_replica_info.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ReplicaInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_replica_info.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}'} # type: ignore - - async def get_replica_health( - self, - partition_id: str, - replica_id: str, - events_health_state_filter: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ReplicaHealth": - """Gets the health of a Service Fabric stateful service replica or stateless service instance. - - Gets the health of a Service Fabric replica. - Use EventsHealthStateFilter to filter the collection of health events reported on the replica - based on the health state. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ReplicaHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ReplicaHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicaHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_replica_health.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ReplicaHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} # type: ignore - - async def get_replica_health_using_policy( - self, - partition_id: str, - replica_id: str, - events_health_state_filter: Optional[int] = 0, - timeout: Optional[int] = 60, - application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, - **kwargs - ) -> "_models.ReplicaHealth": - """Gets the health of a Service Fabric stateful service replica or stateless service instance using the specified policy. - - Gets the health of a Service Fabric stateful service replica or stateless service instance. - Use EventsHealthStateFilter to filter the collection of health events reported on the cluster - based on the health state. - Use ApplicationHealthPolicy to optionally override the health policies used to evaluate the - health. This API only uses 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The - rest of the fields are ignored while evaluating the health of the replica. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ReplicaHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ReplicaHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicaHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_replica_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ReplicaHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_replica_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} # type: ignore - - async def report_replica_health( - self, - partition_id: str, - replica_id: str, - health_information: "_models.HealthInformation", - service_kind: Union[str, "_models.ReplicaHealthReportServiceKind"] = "Stateful", - immediate: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Sends a health report on the Service Fabric replica. - - Reports health state of the specified Service Fabric replica. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway Replica, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetReplicaHealth and check that - the report appears in the HealthEvents section. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param service_kind: The kind of service replica (Stateless or Stateful) for which the health - is being reported. Following are the possible values. - :type service_kind: str or ~azure.servicefabric.models.ReplicaHealthReportServiceKind - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_replica_health.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceKind'] = self._serialize.query("service_kind", service_kind, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/ReportHealth'} # type: ignore - - async def get_deployed_service_replica_info_list( - self, - node_name: str, - application_id: str, - partition_id: Optional[str] = None, - service_manifest_name: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional[List["_models.DeployedServiceReplicaInfo"]]: - """Gets the list of replicas deployed on a Service Fabric node. - - Gets the list containing the information about replicas deployed on a Service Fabric node. The - information include partition ID, replica ID, status of the replica, name of the service, name - of the service type, and other information. Use PartitionId or ServiceManifestName query - parameters to return information about the deployed replicas matching the specified values for - those parameters. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServiceReplicaInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServiceReplicaInfo] or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServiceReplicaInfo"]]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_replica_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if partition_id is not None: - query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServiceReplicaInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_replica_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetReplicas'} # type: ignore - - async def get_deployed_service_replica_detail_info( - self, - node_name: str, - partition_id: str, - replica_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.DeployedServiceReplicaDetailInfo": - """Gets the details of replica deployed on a Service Fabric node. - - Gets the details of the replica deployed on a Service Fabric node. The information includes - service kind, service name, current service operation, current service operation start date - time, partition ID, replica/instance ID, reported load, and other information. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedServiceReplicaDetailInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServiceReplicaDetailInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_replica_detail_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_replica_detail_info.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetDetail'} # type: ignore - - async def get_deployed_service_replica_detail_info_by_partition_id( - self, - node_name: str, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.DeployedServiceReplicaDetailInfo": - """Gets the details of replica deployed on a Service Fabric node. - - Gets the details of the replica deployed on a Service Fabric node. The information includes - service kind, service name, current service operation, current service operation start date - time, partition ID, replica/instance ID, reported load, and other information. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedServiceReplicaDetailInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServiceReplicaDetailInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_replica_detail_info_by_partition_id.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_replica_detail_info_by_partition_id.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas'} # type: ignore - - async def restart_replica( - self, - node_name: str, - partition_id: str, - replica_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Restarts a service replica of a persisted service running on a node. - - Restarts a service replica of a persisted service running on a node. Warning - There are no - safety checks performed when this API is used. Incorrect use of this API can lead to - availability loss for stateful services. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.restart_replica.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - restart_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Restart'} # type: ignore - - async def remove_replica( - self, - node_name: str, - partition_id: str, - replica_id: str, - force_remove: Optional[bool] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Removes a service replica running on a node. - - This API simulates a Service Fabric replica failure by removing a replica from a Service Fabric - cluster. The removal closes the replica, transitions the replica to the role None, and then - removes all of the state information of the replica from the cluster. This API tests the - replica state removal path, and simulates the report fault permanent path through client APIs. - Warning - There are no safety checks performed when this API is used. Incorrect use of this API - can lead to data loss for stateful services. In addition, the forceRemove flag impacts all - other replicas hosted in the same process. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param force_remove: Remove a Service Fabric application or service forcefully without going - through the graceful shutdown sequence. This parameter can be used to forcefully delete an - application or service for which delete is timing out due to issues in the service code that - prevents graceful close of replicas. - :type force_remove: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.remove_replica.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force_remove is not None: - query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Delete'} # type: ignore - - async def get_deployed_service_package_info_list( - self, - node_name: str, - application_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> List["_models.DeployedServicePackageInfo"]: - """Gets the list of service packages deployed on a Service Fabric node. - - Returns the information about the service packages deployed on a Service Fabric node for the - given application. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServicePackageInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedServicePackageInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_package_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[DeployedServicePackageInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages'} # type: ignore - - async def get_deployed_service_package_info_list_by_name( - self, - node_name: str, - application_id: str, - service_package_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> Optional[List["_models.DeployedServicePackageInfo"]]: - """Gets the list of service packages deployed on a Service Fabric node matching exactly the specified name. - - Returns the information about the service packages deployed on a Service Fabric node for the - given application. These results are of service packages whose name match exactly the service - package name specified as the parameter. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServicePackageInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServicePackageInfo"]]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_package_info_list_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServicePackageInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_package_info_list_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}'} # type: ignore - - async def get_deployed_service_package_health( - self, - node_name: str, - application_id: str, - service_package_name: str, - events_health_state_filter: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.DeployedServicePackageHealth": - """Gets the information about health of a service package for a specific application deployed for a Service Fabric node and application. - - Gets the information about health of a service package for a specific application deployed on a - Service Fabric node. Use EventsHealthStateFilter to optionally filter for the collection of - HealthEvent objects reported on the deployed service package based on health state. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedServicePackageHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServicePackageHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_package_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedServicePackageHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} # type: ignore - - async def get_deployed_service_package_health_using_policy( - self, - node_name: str, - application_id: str, - service_package_name: str, - events_health_state_filter: Optional[int] = 0, - timeout: Optional[int] = 60, - application_health_policy: Optional["_models.ApplicationHealthPolicy"] = None, - **kwargs - ) -> "_models.DeployedServicePackageHealth": - """Gets the information about health of service package for a specific application deployed on a Service Fabric node using the specified policy. - - Gets the information about health of a service package for a specific application deployed on a - Service Fabric node. using the specified policy. Use EventsHealthStateFilter to optionally - filter for the collection of HealthEvent objects reported on the deployed service package based - on health state. Use ApplicationHealthPolicy to optionally override the health policies used to - evaluate the health. This API only uses 'ConsiderWarningAsError' field of the - ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the - deployed service package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedServicePackageHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServicePackageHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_package_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedServicePackageHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_package_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} # type: ignore - - async def report_deployed_service_package_health( - self, - node_name: str, - application_id: str, - service_package_name: str, - health_information: "_models.HealthInformation", - immediate: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Sends a health report on the Service Fabric deployed service package. - - Reports health state of the service package of the application deployed on a Service Fabric - node. The report must contain the information about the source of the health report and - property on which it is reported. - The report is sent to a Service Fabric gateway Service, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, get deployed service package health - and check that the report appears in the HealthEvents section. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_deployed_service_package_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/ReportHealth'} # type: ignore - - async def deploy_service_package_to_node( - self, - node_name: str, - deploy_service_package_to_node_description: "_models.DeployServicePackageToNodeDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Downloads all of the code packages associated with specified service manifest on the specified node. - - This API provides a way to download code packages including the container images on a specific - node outside of the normal application deployment and upgrade path. This is useful for the - large code packages and container images to be present on the node before the actual - application deployment and upgrade, thus significantly reducing the total time required for the - deployment or upgrade. - - :param node_name: The name of the node. - :type node_name: str - :param deploy_service_package_to_node_description: Describes information for deploying a - service package to a Service Fabric node. - :type deploy_service_package_to_node_description: ~azure.servicefabric.models.DeployServicePackageToNodeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.deploy_service_package_to_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(deploy_service_package_to_node_description, 'DeployServicePackageToNodeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - deploy_service_package_to_node.metadata = {'url': '/Nodes/{nodeName}/$/DeployServicePackage'} # type: ignore - - async def get_deployed_code_package_info_list( - self, - node_name: str, - application_id: str, - service_manifest_name: Optional[str] = None, - code_package_name: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> List["_models.DeployedCodePackageInfo"]: - """Gets the list of code packages deployed on a Service Fabric node. - - Gets the list of code packages deployed on a Service Fabric node for the given application. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param code_package_name: The name of code package specified in service manifest registered as - part of an application type in a Service Fabric cluster. - :type code_package_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedCodePackageInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedCodePackageInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedCodePackageInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_code_package_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if code_package_name is not None: - query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[DeployedCodePackageInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_code_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages'} # type: ignore - - async def restart_deployed_code_package( - self, - node_name: str, - application_id: str, - restart_deployed_code_package_description: "_models.RestartDeployedCodePackageDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Restarts a code package deployed on a Service Fabric node in a cluster. - - Restarts a code package deployed on a Service Fabric node in a cluster. This aborts the code - package process, which will restart all the user service replicas hosted in that process. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param restart_deployed_code_package_description: Describes the deployed code package on - Service Fabric node to restart. - :type restart_deployed_code_package_description: ~azure.servicefabric.models.RestartDeployedCodePackageDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.restart_deployed_code_package.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(restart_deployed_code_package_description, 'RestartDeployedCodePackageDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - restart_deployed_code_package.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/Restart'} # type: ignore - - async def get_container_logs_deployed_on_node( - self, - node_name: str, - application_id: str, - service_manifest_name: str, - code_package_name: str, - tail: Optional[str] = None, - previous: Optional[bool] = False, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ContainerLogs": - """Gets the container logs for container deployed on a Service Fabric node. - - Gets the container logs for container deployed on a Service Fabric node for the given code - package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param code_package_name: The name of code package specified in service manifest registered as - part of an application type in a Service Fabric cluster. - :type code_package_name: str - :param tail: Number of lines to show from the end of the logs. Default is 100. 'all' to show - the complete logs. - :type tail: str - :param previous: Specifies whether to get container logs from exited/dead containers of the - code package instance. - :type previous: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ContainerLogs, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ContainerLogs - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerLogs"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_container_logs_deployed_on_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') - if tail is not None: - query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') - if previous is not None: - query_parameters['Previous'] = self._serialize.query("previous", previous, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ContainerLogs', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_container_logs_deployed_on_node.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerLogs'} # type: ignore - - async def invoke_container_api( - self, - node_name: str, - application_id: str, - service_manifest_name: str, - code_package_name: str, - code_package_instance_id: str, - container_api_request_body: "_models.ContainerApiRequestBody", - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ContainerApiResponse": - """Invoke container API on a container deployed on a Service Fabric node. - - Invoke container API on a container deployed on a Service Fabric node for the given code - package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param code_package_name: The name of code package specified in service manifest registered as - part of an application type in a Service Fabric cluster. - :type code_package_name: str - :param code_package_instance_id: ID that uniquely identifies a code package instance deployed - on a service fabric node. - :type code_package_instance_id: str - :param container_api_request_body: Parameters for making container API call. - :type container_api_request_body: ~azure.servicefabric.models.ContainerApiRequestBody - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ContainerApiResponse, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ContainerApiResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerApiResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.invoke_container_api.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') - query_parameters['CodePackageInstanceId'] = self._serialize.query("code_package_instance_id", code_package_instance_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(container_api_request_body, 'ContainerApiRequestBody') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ContainerApiResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - invoke_container_api.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerApi'} # type: ignore - - async def create_compose_deployment( - self, - create_compose_deployment_description: "_models.CreateComposeDeploymentDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Creates a Service Fabric compose deployment. - - Compose is a file format that describes multi-container applications. This API allows deploying - container based applications defined in compose format in a Service Fabric cluster. Once the - deployment is created, its status can be tracked via the ``GetComposeDeploymentStatus`` API. - - :param create_compose_deployment_description: Describes the compose deployment that needs to be - created. - :type create_compose_deployment_description: ~azure.servicefabric.models.CreateComposeDeploymentDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_compose_deployment.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(create_compose_deployment_description, 'CreateComposeDeploymentDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_compose_deployment.metadata = {'url': '/ComposeDeployments/$/Create'} # type: ignore - - async def get_compose_deployment_status( - self, - deployment_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ComposeDeploymentStatusInfo": - """Gets information about a Service Fabric compose deployment. - - Returns the status of the compose deployment that was created or in the process of being - created in the Service Fabric cluster and whose name matches the one specified as the - parameter. The response includes the name, status, and other details about the deployment. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ComposeDeploymentStatusInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ComposeDeploymentStatusInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ComposeDeploymentStatusInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_compose_deployment_status.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ComposeDeploymentStatusInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_compose_deployment_status.metadata = {'url': '/ComposeDeployments/{deploymentName}'} # type: ignore - - async def get_compose_deployment_status_list( - self, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedComposeDeploymentStatusInfoList": - """Gets the list of compose deployments created in the Service Fabric cluster. - - Gets the status about the compose deployments that were created or in the process of being - created in the Service Fabric cluster. The response includes the name, status, and other - details about the compose deployments. If the list of deployments do not fit in a page, one - page of results is returned as well as a continuation token, which can be used to get the next - page. - - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedComposeDeploymentStatusInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedComposeDeploymentStatusInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedComposeDeploymentStatusInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_compose_deployment_status_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedComposeDeploymentStatusInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_compose_deployment_status_list.metadata = {'url': '/ComposeDeployments'} # type: ignore - - async def get_compose_deployment_upgrade_progress( - self, - deployment_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ComposeDeploymentUpgradeProgressInfo": - """Gets details for the latest upgrade performed on this Service Fabric compose deployment. - - Returns the information about the state of the compose deployment upgrade along with details to - aid debugging application health issues. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ComposeDeploymentUpgradeProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ComposeDeploymentUpgradeProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ComposeDeploymentUpgradeProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_compose_deployment_upgrade_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ComposeDeploymentUpgradeProgressInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_compose_deployment_upgrade_progress.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/GetUpgradeProgress'} # type: ignore - - async def remove_compose_deployment( - self, - deployment_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Deletes an existing Service Fabric compose deployment from cluster. - - Deletes an existing Service Fabric compose deployment. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.remove_compose_deployment.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_compose_deployment.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Delete'} # type: ignore - - async def start_compose_deployment_upgrade( - self, - deployment_name: str, - compose_deployment_upgrade_description: "_models.ComposeDeploymentUpgradeDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Starts upgrading a compose deployment in the Service Fabric cluster. - - Validates the supplied upgrade parameters and starts upgrading the deployment if the parameters - are valid. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param compose_deployment_upgrade_description: Parameters for upgrading compose deployment. - :type compose_deployment_upgrade_description: ~azure.servicefabric.models.ComposeDeploymentUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_compose_deployment_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(compose_deployment_upgrade_description, 'ComposeDeploymentUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Upgrade'} # type: ignore - - async def start_rollback_compose_deployment_upgrade( - self, - deployment_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Starts rolling back a compose deployment upgrade in the Service Fabric cluster. - - Rollback a service fabric compose deployment upgrade. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_rollback_compose_deployment_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_rollback_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/RollbackUpgrade'} # type: ignore - - async def get_chaos( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.Chaos": - """Get the status of Chaos. - - Get the status of Chaos indicating whether or not Chaos is running, the Chaos parameters used - for running Chaos and the status of the Chaos Schedule. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: Chaos, or the result of cls(response) - :rtype: ~azure.servicefabric.models.Chaos - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.Chaos"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_chaos.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('Chaos', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_chaos.metadata = {'url': '/Tools/Chaos'} # type: ignore - - async def start_chaos( - self, - chaos_parameters: "_models.ChaosParameters", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Starts Chaos in the cluster. - - If Chaos is not already running in the cluster, it starts Chaos with the passed in Chaos - parameters. - If Chaos is already running when this call is made, the call fails with the error code - FABRIC_E_CHAOS_ALREADY_RUNNING. - Refer to the article `Induce controlled Chaos in Service Fabric clusters - `_ for more - details. - - :param chaos_parameters: Describes all the parameters to configure a Chaos run. - :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_chaos.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(chaos_parameters, 'ChaosParameters') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_chaos.metadata = {'url': '/Tools/Chaos/$/Start'} # type: ignore - - async def stop_chaos( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Stops Chaos if it is running in the cluster and put the Chaos Schedule in a stopped state. - - Stops Chaos from executing new faults. In-flight faults will continue to execute until they are - complete. The current Chaos Schedule is put into a stopped state. - Once a schedule is stopped, it will stay in the stopped state and not be used to Chaos Schedule - new runs of Chaos. A new Chaos Schedule must be set in order to resume scheduling. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.stop_chaos.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - stop_chaos.metadata = {'url': '/Tools/Chaos/$/Stop'} # type: ignore - - async def get_chaos_events( - self, - continuation_token_parameter: Optional[str] = None, - start_time_utc: Optional[str] = None, - end_time_utc: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ChaosEventsSegment": - """Gets the next segment of the Chaos events based on the continuation token or the time range. - - To get the next segment of the Chaos events, you can specify the ContinuationToken. To get the - start of a new segment of Chaos events, you can specify the time range - through StartTimeUtc and EndTimeUtc. You cannot specify both the ContinuationToken and the time - range in the same call. - When there are more than 100 Chaos events, the Chaos events are returned in multiple segments - where a segment contains no more than 100 Chaos events and to get the next segment you make a - call to this API with the continuation token. - - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param start_time_utc: The Windows file time representing the start time of the time range for - which a Chaos report is to be generated. Consult `DateTime.ToFileTimeUtc Method - `_.aspx) for - details. - :type start_time_utc: str - :param end_time_utc: The Windows file time representing the end time of the time range for - which a Chaos report is to be generated. Consult `DateTime.ToFileTimeUtc Method - `_.aspx) for - details. - :type end_time_utc: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ChaosEventsSegment, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ChaosEventsSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ChaosEventsSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_chaos_events.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if start_time_utc is not None: - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - if end_time_utc is not None: - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ChaosEventsSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_chaos_events.metadata = {'url': '/Tools/Chaos/Events'} # type: ignore - - async def get_chaos_schedule( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ChaosScheduleDescription": - """Get the Chaos Schedule defining when and how to run Chaos. - - Gets the version of the Chaos Schedule in use and the Chaos Schedule that defines when and how - to run Chaos. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ChaosScheduleDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ChaosScheduleDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ChaosScheduleDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_chaos_schedule.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ChaosScheduleDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} # type: ignore - - async def post_chaos_schedule( - self, - timeout: Optional[int] = 60, - version: Optional[int] = None, - schedule: Optional["_models.ChaosSchedule"] = None, - **kwargs - ) -> None: - """Set the schedule used by Chaos. - - Chaos will automatically schedule runs based on the Chaos Schedule. - The Chaos Schedule will be updated if the provided version matches the version on the server. - When updating the Chaos Schedule, the version on the server is incremented by 1. - The version on the server will wrap back to 0 after reaching a large number. - If Chaos is running when this call is made, the call will fail. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param version: The version number of the Schedule. - :type version: int - :param schedule: Defines the schedule used by Chaos. - :type schedule: ~azure.servicefabric.models.ChaosSchedule - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _chaos_schedule = _models.ChaosScheduleDescription(version=version, schedule=schedule) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.post_chaos_schedule.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_chaos_schedule, 'ChaosScheduleDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - post_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} # type: ignore - - async def upload_file( - self, - content_path: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Uploads contents of the file to the image store. - - Uploads contents of the file to the image store. Use this API if the file is small enough to - upload again if the connection fails. The file's data needs to be added to the request body. - The contents will be uploaded to the specified path. Image store service uses a mark file to - indicate the availability of the folder. The mark file is an empty file named "_.dir". The mark - file is generated by the image store service when all files in a folder are uploaded. When - using File-by-File approach to upload application package in REST, the image store service - isn't aware of the file hierarchy of the application package; you need to create a mark file - per folder and upload it last, to let the image store service know that the folder is complete. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.upload_file.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - upload_file.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore - - async def get_image_store_content( - self, - content_path: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ImageStoreContent": - """Gets the image store content information. - - Returns the information about the image store content at the specified contentPath. The - contentPath is relative to the root of the image store. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ImageStoreContent, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ImageStoreContent - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreContent"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_content.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ImageStoreContent', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore - - async def delete_image_store_content( - self, - content_path: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Deletes existing image store content. - - Deletes existing image store content being found within the given image store relative path. - This command can be used to delete uploaded application packages once they are provisioned. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_image_store_content.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore - - async def get_image_store_root_content( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ImageStoreContent": - """Gets the content information at the root of the image store. - - Returns the information about the image store content at the root of the image store. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ImageStoreContent, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ImageStoreContent - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreContent"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_root_content.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ImageStoreContent', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_root_content.metadata = {'url': '/ImageStore'} # type: ignore - - async def copy_image_store_content( - self, - image_store_copy_description: "_models.ImageStoreCopyDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Copies image store content internally. - - Copies the image store content from the source image store relative path to the destination - image store relative path. - - :param image_store_copy_description: Describes the copy description for the image store. - :type image_store_copy_description: ~azure.servicefabric.models.ImageStoreCopyDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.copy_image_store_content.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(image_store_copy_description, 'ImageStoreCopyDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - copy_image_store_content.metadata = {'url': '/ImageStore/$/Copy'} # type: ignore - - async def delete_image_store_upload_session( - self, - session_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Cancels an image store upload session. - - The DELETE request will cause the existing upload session to expire and remove any previously - uploaded file chunks. - - :param session_id: A GUID generated by the user for a file uploading. It identifies an image - store upload session which keeps track of all file chunks until it is committed. - :type session_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_image_store_upload_session.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_image_store_upload_session.metadata = {'url': '/ImageStore/$/DeleteUploadSession'} # type: ignore - - async def commit_image_store_upload_session( - self, - session_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Commit an image store upload session. - - When all file chunks have been uploaded, the upload session needs to be committed explicitly to - complete the upload. Image store preserves the upload session until the expiration time, which - is 30 minutes after the last chunk received. - - :param session_id: A GUID generated by the user for a file uploading. It identifies an image - store upload session which keeps track of all file chunks until it is committed. - :type session_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.commit_image_store_upload_session.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - commit_image_store_upload_session.metadata = {'url': '/ImageStore/$/CommitUploadSession'} # type: ignore - - async def get_image_store_upload_session_by_id( - self, - session_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.UploadSession": - """Get the image store upload session by ID. - - Gets the image store upload session identified by the given ID. User can query the upload - session at any time during uploading. - - :param session_id: A GUID generated by the user for a file uploading. It identifies an image - store upload session which keeps track of all file chunks until it is committed. - :type session_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UploadSession, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UploadSession - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UploadSession"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_upload_session_by_id.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UploadSession', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_upload_session_by_id.metadata = {'url': '/ImageStore/$/GetUploadSession'} # type: ignore - - async def get_image_store_upload_session_by_path( - self, - content_path: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.UploadSession": - """Get the image store upload session by relative path. - - Gets the image store upload session associated with the given image store relative path. User - can query the upload session at any time during uploading. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UploadSession, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UploadSession - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UploadSession"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_upload_session_by_path.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UploadSession', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_upload_session_by_path.metadata = {'url': '/ImageStore/{contentPath}/$/GetUploadSession'} # type: ignore - - async def upload_file_chunk( - self, - content_path: str, - session_id: str, - content_range: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Uploads a file chunk to the image store relative path. - - Uploads a file chunk to the image store with the specified upload session ID and image store - relative path. This API allows user to resume the file upload operation. user doesn't have to - restart the file upload from scratch whenever there is a network interruption. Use this option - if the file size is large. - - To perform a resumable file upload, user need to break the file into multiple chunks and upload - these chunks to the image store one-by-one. Chunks don't have to be uploaded in order. If the - file represented by the image store relative path already exists, it will be overwritten when - the upload session commits. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param session_id: A GUID generated by the user for a file uploading. It identifies an image - store upload session which keeps track of all file chunks until it is committed. - :type session_id: str - :param content_range: When uploading file chunks to the image store, the Content-Range header - field need to be configured and sent with a request. The format should looks like "bytes - {First-Byte-Position}-{Last-Byte-Position}/{File-Length}". For example, Content-Range:bytes - 300-5000/20000 indicates that user is sending bytes 300 through 5,000 and the total file length - is 20,000 bytes. - :type content_range: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.upload_file_chunk.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Range'] = self._serialize.header("content_range", content_range, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - upload_file_chunk.metadata = {'url': '/ImageStore/{contentPath}/$/UploadChunk'} # type: ignore - - async def get_image_store_root_folder_size( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.FolderSizeInfo": - """Get the folder size at the root of the image store. - - Returns the total size of files at the root and children folders in image store. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FolderSizeInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.FolderSizeInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FolderSizeInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_root_folder_size.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('FolderSizeInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_root_folder_size.metadata = {'url': '/ImageStore/$/FolderSize'} # type: ignore - - async def get_image_store_folder_size( - self, - content_path: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.FolderSizeInfo": - """Get the size of a folder in image store. - - Gets the total size of file under a image store folder, specified by contentPath. The - contentPath is relative to the root of the image store. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FolderSizeInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.FolderSizeInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FolderSizeInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_folder_size.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('FolderSizeInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_folder_size.metadata = {'url': '/ImageStore/{contentPath}/$/FolderSize'} # type: ignore - - async def get_image_store_info( - self, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.ImageStoreInfo": - """Gets the overall ImageStore information. - - Returns information about the primary ImageStore replica, such as disk capacity and available - disk space at the node it is on, and several categories of the ImageStore's file system usage. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ImageStoreInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ImageStoreInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_info.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ImageStoreInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_info.metadata = {'url': '/ImageStore/$/Info'} # type: ignore - - async def invoke_infrastructure_command( - self, - command: str, - service_id: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> IO: - """Invokes an administrative command on the given Infrastructure Service instance. - - For clusters that have one or more instances of the Infrastructure Service configured, - this API provides a way to send infrastructure-specific commands to a particular - instance of the Infrastructure Service. - - Available commands and their corresponding response formats vary depending upon - the infrastructure on which the cluster is running. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param command: The text of the command to be invoked. The content of the command is - infrastructure-specific. - :type command: str - :param service_id: The identity of the infrastructure service. This is the full name of the - infrastructure service without the 'fabric:' URI scheme. This parameter required only for the - cluster that has more than one instance of infrastructure service running. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.invoke_infrastructure_command.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['Command'] = self._serialize.query("command", command, 'str') - if service_id is not None: - query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - invoke_infrastructure_command.metadata = {'url': '/$/InvokeInfrastructureCommand'} # type: ignore - - async def invoke_infrastructure_query( - self, - command: str, - service_id: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> IO: - """Invokes a read-only query on the given infrastructure service instance. - - For clusters that have one or more instances of the Infrastructure Service configured, - this API provides a way to send infrastructure-specific queries to a particular - instance of the Infrastructure Service. - - Available commands and their corresponding response formats vary depending upon - the infrastructure on which the cluster is running. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param command: The text of the command to be invoked. The content of the command is - infrastructure-specific. - :type command: str - :param service_id: The identity of the infrastructure service. This is the full name of the - infrastructure service without the 'fabric:' URI scheme. This parameter required only for the - cluster that has more than one instance of infrastructure service running. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.invoke_infrastructure_query.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['Command'] = self._serialize.query("command", command, 'str') - if service_id is not None: - query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - invoke_infrastructure_query.metadata = {'url': '/$/InvokeInfrastructureQuery'} # type: ignore - - async def start_data_loss( - self, - service_id: str, - partition_id: str, - operation_id: str, - data_loss_mode: Union[str, "_models.DataLossMode"], - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """This API will induce data loss for the specified partition. It will trigger a call to the OnDataLossAsync API of the partition. - - This API will induce data loss for the specified partition. It will trigger a call to the - OnDataLoss API of the partition. - Actual data loss will depend on the specified DataLossMode. - - - * PartialDataLoss - Only a quorum of replicas are removed and OnDataLoss is triggered for the - partition but actual data loss depends on the presence of in-flight replication. - * FullDataLoss - All replicas are removed hence all data is lost and OnDataLoss is triggered. - - This API should only be called with a stateful service as the target. - - Calling this API with a system service as the target is not advised. - - Note: Once this API has been called, it cannot be reversed. Calling CancelOperation will only - stop execution and clean up internal system state. - It will not restore data if the command has progressed far enough to cause data loss. - - Call the GetDataLossProgress API with the same OperationId to return information on the - operation started with this API. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param data_loss_mode: This enum is passed to the StartDataLoss API to indicate what type of - data loss to induce. - :type data_loss_mode: str or ~azure.servicefabric.models.DataLossMode - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_data_loss.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['DataLossMode'] = self._serialize.query("data_loss_mode", data_loss_mode, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_data_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartDataLoss'} # type: ignore - - async def get_data_loss_progress( - self, - service_id: str, - partition_id: str, - operation_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PartitionDataLossProgress": - """Gets the progress of a partition data loss operation started using the StartDataLoss API. - - Gets the progress of a data loss operation started with StartDataLoss, using the OperationId. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionDataLossProgress, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionDataLossProgress - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionDataLossProgress"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_data_loss_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionDataLossProgress', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_data_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetDataLossProgress'} # type: ignore - - async def start_quorum_loss( - self, - service_id: str, - partition_id: str, - operation_id: str, - quorum_loss_mode: Union[str, "_models.QuorumLossMode"], - quorum_loss_duration: int, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Induces quorum loss for a given stateful service partition. - - This API is useful for a temporary quorum loss situation on your service. - - Call the GetQuorumLossProgress API with the same OperationId to return information on the - operation started with this API. - - This can only be called on stateful persisted (HasPersistedState==true) services. Do not use - this API on stateless services or stateful in-memory only services. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param quorum_loss_mode: This enum is passed to the StartQuorumLoss API to indicate what type - of quorum loss to induce. - :type quorum_loss_mode: str or ~azure.servicefabric.models.QuorumLossMode - :param quorum_loss_duration: The amount of time for which the partition will be kept in quorum - loss. This must be specified in seconds. - :type quorum_loss_duration: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_quorum_loss.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['QuorumLossMode'] = self._serialize.query("quorum_loss_mode", quorum_loss_mode, 'str') - query_parameters['QuorumLossDuration'] = self._serialize.query("quorum_loss_duration", quorum_loss_duration, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_quorum_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartQuorumLoss'} # type: ignore - - async def get_quorum_loss_progress( - self, - service_id: str, - partition_id: str, - operation_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PartitionQuorumLossProgress": - """Gets the progress of a quorum loss operation on a partition started using the StartQuorumLoss API. - - Gets the progress of a quorum loss operation started with StartQuorumLoss, using the provided - OperationId. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionQuorumLossProgress, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionQuorumLossProgress - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionQuorumLossProgress"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_quorum_loss_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionQuorumLossProgress', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_quorum_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetQuorumLossProgress'} # type: ignore - - async def start_partition_restart( - self, - service_id: str, - partition_id: str, - operation_id: str, - restart_partition_mode: Union[str, "_models.RestartPartitionMode"], - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """This API will restart some or all replicas or instances of the specified partition. - - This API is useful for testing failover. - - If used to target a stateless service partition, RestartPartitionMode must be - AllReplicasOrInstances. - - Call the GetPartitionRestartProgress API using the same OperationId to get the progress. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param restart_partition_mode: Describe which partitions to restart. - :type restart_partition_mode: str or ~azure.servicefabric.models.RestartPartitionMode - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_partition_restart.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['RestartPartitionMode'] = self._serialize.query("restart_partition_mode", restart_partition_mode, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_partition_restart.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartRestart'} # type: ignore - - async def get_partition_restart_progress( - self, - service_id: str, - partition_id: str, - operation_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PartitionRestartProgress": - """Gets the progress of a PartitionRestart operation started using StartPartitionRestart. - - Gets the progress of a PartitionRestart started with StartPartitionRestart using the provided - OperationId. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionRestartProgress, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionRestartProgress - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionRestartProgress"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_restart_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionRestartProgress', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_restart_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetRestartProgress'} # type: ignore - - async def start_node_transition( - self, - node_name: str, - operation_id: str, - node_transition_type: Union[str, "_models.NodeTransitionType"], - node_instance_id: str, - stop_duration_in_seconds: int, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Starts or stops a cluster node. - - Starts or stops a cluster node. A cluster node is a process, not the OS instance itself. To - start a node, pass in "Start" for the NodeTransitionType parameter. - To stop a node, pass in "Stop" for the NodeTransitionType parameter. This API starts the - operation - when the API returns the node may not have finished transitioning yet. - Call GetNodeTransitionProgress with the same OperationId to get the progress of the operation. - - :param node_name: The name of the node. - :type node_name: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param node_transition_type: Indicates the type of transition to perform. - NodeTransitionType.Start will start a stopped node. NodeTransitionType.Stop will stop a node - that is up. - :type node_transition_type: str or ~azure.servicefabric.models.NodeTransitionType - :param node_instance_id: The node instance ID of the target node. This can be determined - through GetNodeInfo API. - :type node_instance_id: str - :param stop_duration_in_seconds: The duration, in seconds, to keep the node stopped. The - minimum value is 600, the maximum is 14400. After this time expires, the node will - automatically come back up. - :type stop_duration_in_seconds: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_node_transition.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['NodeTransitionType'] = self._serialize.query("node_transition_type", node_transition_type, 'str') - query_parameters['NodeInstanceId'] = self._serialize.query("node_instance_id", node_instance_id, 'str') - query_parameters['StopDurationInSeconds'] = self._serialize.query("stop_duration_in_seconds", stop_duration_in_seconds, 'int', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_node_transition.metadata = {'url': '/Faults/Nodes/{nodeName}/$/StartTransition/'} # type: ignore - - async def get_node_transition_progress( - self, - node_name: str, - operation_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.NodeTransitionProgress": - """Gets the progress of an operation started using StartNodeTransition. - - Gets the progress of an operation started with StartNodeTransition using the provided - OperationId. - - :param node_name: The name of the node. - :type node_name: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeTransitionProgress, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeTransitionProgress - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeTransitionProgress"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_transition_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('NodeTransitionProgress', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_transition_progress.metadata = {'url': '/Faults/Nodes/{nodeName}/$/GetTransitionProgress'} # type: ignore - - async def get_fault_operation_list( - self, - type_filter: int = 65535, - state_filter: int = 65535, - timeout: Optional[int] = 60, - **kwargs - ) -> List["_models.OperationStatus"]: - """Gets a list of user-induced fault operations filtered by provided input. - - Gets the list of user-induced fault operations filtered by provided input. - - :param type_filter: Used to filter on OperationType for user-induced operations. - - - * 65535 - select all - * 1 - select PartitionDataLoss. - * 2 - select PartitionQuorumLoss. - * 4 - select PartitionRestart. - * 8 - select NodeTransition. - :type type_filter: int - :param state_filter: Used to filter on OperationState's for user-induced operations. - - - * 65535 - select All - * 1 - select Running - * 2 - select RollingBack - * 8 - select Completed - * 16 - select Faulted - * 32 - select Cancelled - * 64 - select ForceCancelled. - :type state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of OperationStatus, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.OperationStatus] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.OperationStatus"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_fault_operation_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['TypeFilter'] = self._serialize.query("type_filter", type_filter, 'int') - query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[OperationStatus]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_fault_operation_list.metadata = {'url': '/Faults/'} # type: ignore - - async def cancel_operation( - self, - operation_id: str, - force: bool = False, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Cancels a user-induced fault operation. - - The following APIs start fault operations that may be cancelled by using CancelOperation: - StartDataLoss, StartQuorumLoss, StartPartitionRestart, StartNodeTransition. - - If force is false, then the specified user-induced operation will be gracefully stopped and - cleaned up. If force is true, the command will be aborted, and some internal state - may be left behind. Specifying force as true should be used with care. Calling this API with - force set to true is not allowed until this API has already - been called on the same test command with force set to false first, or unless the test command - already has an OperationState of OperationState.RollingBack. - Clarification: OperationState.RollingBack means that the system will be/is cleaning up internal - system state caused by executing the command. It will not restore data if the - test command was to cause data loss. For example, if you call StartDataLoss then call this - API, the system will only clean up internal state from running the command. - It will not restore the target partition's data, if the command progressed far enough to cause - data loss. - - Important note: if this API is invoked with force==true, internal state may be left behind. - - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param force: Indicates whether to gracefully roll back and clean up internal system state - modified by executing the user-induced operation. - :type force: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.cancel_operation.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['Force'] = self._serialize.query("force", force, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - cancel_operation.metadata = {'url': '/Faults/$/Cancel'} # type: ignore - - async def create_backup_policy( - self, - backup_policy_description: "_models.BackupPolicyDescription", - timeout: Optional[int] = 60, - validate_connection: Optional[bool] = False, - **kwargs - ) -> None: - """Creates a backup policy. - - Creates a backup policy which can be associated later with a Service Fabric application, - service or a partition for periodic backup. - - :param backup_policy_description: Describes the backup policy. - :type backup_policy_description: ~azure.servicefabric.models.BackupPolicyDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param validate_connection: Specifies whether to validate the storage connection and - credentials before creating or updating the backup policies. - :type validate_connection: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_backup_policy.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if validate_connection is not None: - query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/$/Create'} # type: ignore - - async def delete_backup_policy( - self, - backup_policy_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Deletes the backup policy. - - Deletes an existing backup policy. A backup policy must be created before it can be deleted. A - currently active backup policy, associated with any Service Fabric application, service or - partition, cannot be deleted without first deleting the mapping. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_backup_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Delete'} # type: ignore - - async def get_backup_policy_list( - self, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedBackupPolicyDescriptionList": - """Gets all the backup policies configured. - - Get a list of all the backup policies configured. - - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupPolicyDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupPolicyDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupPolicyDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_backup_policy_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupPolicyDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_backup_policy_list.metadata = {'url': '/BackupRestore/BackupPolicies'} # type: ignore - - async def get_backup_policy_by_name( - self, - backup_policy_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.BackupPolicyDescription": - """Gets a particular backup policy by name. - - Gets a particular backup policy identified by {backupPolicyName}. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BackupPolicyDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.BackupPolicyDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicyDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_backup_policy_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('BackupPolicyDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_backup_policy_by_name.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}'} # type: ignore - - async def get_all_entities_backed_up_by_policy( - self, - backup_policy_name: str, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedBackupEntityList": - """Gets the list of backup entities that are associated with this policy. - - Returns a list of Service Fabric application, service or partition which are associated with - this backup policy. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupEntityList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupEntityList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupEntityList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_all_entities_backed_up_by_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupEntityList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_all_entities_backed_up_by_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/GetBackupEnabledEntities'} # type: ignore - - async def update_backup_policy( - self, - backup_policy_name: str, - backup_policy_description: "_models.BackupPolicyDescription", - timeout: Optional[int] = 60, - validate_connection: Optional[bool] = False, - **kwargs - ) -> None: - """Updates the backup policy. - - Updates the backup policy identified by {backupPolicyName}. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param backup_policy_description: Describes the backup policy. - :type backup_policy_description: ~azure.servicefabric.models.BackupPolicyDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param validate_connection: Specifies whether to validate the storage connection and - credentials before creating or updating the backup policies. - :type validate_connection: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_backup_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if validate_connection is not None: - query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - update_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Update'} # type: ignore - - async def enable_application_backup( - self, - application_id: str, - backup_policy_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Enables periodic backup of stateful partitions under this Service Fabric application. - - Enables periodic backup of stateful partitions which are part of this Service Fabric - application. Each partition is backed up individually as per the specified backup policy - description. - Note only C# based Reliable Actor and Reliable Stateful services are currently supported for - periodic backup. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.enable_application_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - enable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/EnableBackup'} # type: ignore - - async def disable_application_backup( - self, - application_id: str, - clean_backup: bool, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Disables periodic backup of Service Fabric application. - - Disables periodic backup of Service Fabric application which was previously enabled. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the - backups which were created for the backup entity that is getting disabled for backup. - :type clean_backup: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.disable_application_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _disable_backup_description is not None: - body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - disable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/DisableBackup'} # type: ignore - - async def get_application_backup_configuration_info( - self, - application_id: str, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedBackupConfigurationInfoList": - """Gets the Service Fabric application backup configuration information. - - Gets the Service Fabric backup configuration information for the application and the services - and partitions under this application. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupConfigurationInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupConfigurationInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_backup_configuration_info.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupConfigurationInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_backup_configuration_info.metadata = {'url': '/Applications/{applicationId}/$/GetBackupConfigurationInfo'} # type: ignore - - async def get_application_backup_list( - self, - application_id: str, - timeout: Optional[int] = 60, - latest: Optional[bool] = False, - start_date_time_filter: Optional[datetime.datetime] = None, - end_date_time_filter: Optional[datetime.datetime] = None, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - **kwargs - ) -> "_models.PagedBackupInfoList": - """Gets the list of backups available for every partition in this application. - - Returns a list of backups available for every partition in this Service Fabric application. The - server enumerates all the backups available at the backup location configured in the backup - policy. It also allows filtering of the result based on start and end datetime or just fetching - the latest available backup for every partition. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param latest: Specifies whether to get only the most recent backup available for a partition - for the specified time range. - :type latest: bool - :param start_date_time_filter: Specify the start date time from which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, all backups from the beginning are enumerated. - :type start_date_time_filter: ~datetime.datetime - :param end_date_time_filter: Specify the end date time till which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, enumeration is done till the most recent backup. - :type end_date_time_filter: ~datetime.datetime - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_backup_list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if latest is not None: - query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') - if start_date_time_filter is not None: - query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') - if end_date_time_filter is not None: - query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_backup_list.metadata = {'url': '/Applications/{applicationId}/$/GetBackups'} # type: ignore - - async def suspend_application_backup( - self, - application_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Suspends periodic backup for the specified Service Fabric application. - - The application which is configured to take periodic backups, is suspended for taking further - backups till it is resumed again. This operation applies to the entire application's hierarchy. - It means all the services and partitions under this application are now suspended for backup. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.suspend_application_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - suspend_application_backup.metadata = {'url': '/Applications/{applicationId}/$/SuspendBackup'} # type: ignore - - async def resume_application_backup( - self, - application_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Resumes periodic backup of a Service Fabric application which was previously suspended. - - The previously suspended Service Fabric application resumes taking periodic backup as per the - backup policy currently configured for the same. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.resume_application_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_application_backup.metadata = {'url': '/Applications/{applicationId}/$/ResumeBackup'} # type: ignore - - async def enable_service_backup( - self, - service_id: str, - backup_policy_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Enables periodic backup of stateful partitions under this Service Fabric service. - - Enables periodic backup of stateful partitions which are part of this Service Fabric service. - Each partition is backed up individually as per the specified backup policy description. In - case the application, which the service is part of, is already enabled for backup then this - operation would override the policy being used to take the periodic backup for this service and - its partitions (unless explicitly overridden at the partition level). - Note only C# based Reliable Actor and Reliable Stateful services are currently supported for - periodic backup. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.enable_service_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - enable_service_backup.metadata = {'url': '/Services/{serviceId}/$/EnableBackup'} # type: ignore - - async def disable_service_backup( - self, - service_id: str, - clean_backup: bool, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Disables periodic backup of Service Fabric service which was previously enabled. - - Disables periodic backup of Service Fabric service which was previously enabled. Backup must be - explicitly enabled before it can be disabled. - In case the backup is enabled for the Service Fabric application, which this service is part - of, this service would continue to be periodically backed up as per the policy mapped at the - application level. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the - backups which were created for the backup entity that is getting disabled for backup. - :type clean_backup: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.disable_service_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _disable_backup_description is not None: - body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - disable_service_backup.metadata = {'url': '/Services/{serviceId}/$/DisableBackup'} # type: ignore - - async def get_service_backup_configuration_info( - self, - service_id: str, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedBackupConfigurationInfoList": - """Gets the Service Fabric service backup configuration information. - - Gets the Service Fabric backup configuration information for the service and the partitions - under this service. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupConfigurationInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupConfigurationInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_backup_configuration_info.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupConfigurationInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_backup_configuration_info.metadata = {'url': '/Services/{serviceId}/$/GetBackupConfigurationInfo'} # type: ignore - - async def get_service_backup_list( - self, - service_id: str, - timeout: Optional[int] = 60, - latest: Optional[bool] = False, - start_date_time_filter: Optional[datetime.datetime] = None, - end_date_time_filter: Optional[datetime.datetime] = None, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - **kwargs - ) -> "_models.PagedBackupInfoList": - """Gets the list of backups available for every partition in this service. - - Returns a list of backups available for every partition in this Service Fabric service. The - server enumerates all the backups available in the backup store configured in the backup - policy. It also allows filtering of the result based on start and end datetime or just fetching - the latest available backup for every partition. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param latest: Specifies whether to get only the most recent backup available for a partition - for the specified time range. - :type latest: bool - :param start_date_time_filter: Specify the start date time from which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, all backups from the beginning are enumerated. - :type start_date_time_filter: ~datetime.datetime - :param end_date_time_filter: Specify the end date time till which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, enumeration is done till the most recent backup. - :type end_date_time_filter: ~datetime.datetime - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_backup_list.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if latest is not None: - query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') - if start_date_time_filter is not None: - query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') - if end_date_time_filter is not None: - query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_backup_list.metadata = {'url': '/Services/{serviceId}/$/GetBackups'} # type: ignore - - async def suspend_service_backup( - self, - service_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Suspends periodic backup for the specified Service Fabric service. - - The service which is configured to take periodic backups, is suspended for taking further - backups till it is resumed again. This operation applies to the entire service's hierarchy. It - means all the partitions under this service are now suspended for backup. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.suspend_service_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - suspend_service_backup.metadata = {'url': '/Services/{serviceId}/$/SuspendBackup'} # type: ignore - - async def resume_service_backup( - self, - service_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Resumes periodic backup of a Service Fabric service which was previously suspended. - - The previously suspended Service Fabric service resumes taking periodic backup as per the - backup policy currently configured for the same. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.resume_service_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_service_backup.metadata = {'url': '/Services/{serviceId}/$/ResumeBackup'} # type: ignore - - async def enable_partition_backup( - self, - partition_id: str, - backup_policy_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Enables periodic backup of the stateful persisted partition. - - Enables periodic backup of stateful persisted partition. Each partition is backed up as per the - specified backup policy description. In case the application or service, which is partition is - part of, is already enabled for backup then this operation would override the policy being used - to take the periodic backup of this partition. - Note only C# based Reliable Actor and Reliable Stateful services are currently supported for - periodic backup. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.enable_partition_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - enable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/EnableBackup'} # type: ignore - - async def disable_partition_backup( - self, - partition_id: str, - clean_backup: bool, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Disables periodic backup of Service Fabric partition which was previously enabled. - - Disables periodic backup of partition which was previously enabled. Backup must be explicitly - enabled before it can be disabled. - In case the backup is enabled for the Service Fabric application or service, which this - partition is part of, this partition would continue to be periodically backed up as per the - policy mapped at the higher level entity. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the - backups which were created for the backup entity that is getting disabled for backup. - :type clean_backup: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.disable_partition_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _disable_backup_description is not None: - body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - disable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/DisableBackup'} # type: ignore - - async def get_partition_backup_configuration_info( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PartitionBackupConfigurationInfo": - """Gets the partition backup configuration information. - - Gets the Service Fabric Backup configuration information for the specified partition. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionBackupConfigurationInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionBackupConfigurationInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionBackupConfigurationInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_backup_configuration_info.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionBackupConfigurationInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_backup_configuration_info.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupConfigurationInfo'} # type: ignore - - async def get_partition_backup_list( - self, - partition_id: str, - timeout: Optional[int] = 60, - latest: Optional[bool] = False, - start_date_time_filter: Optional[datetime.datetime] = None, - end_date_time_filter: Optional[datetime.datetime] = None, - **kwargs - ) -> "_models.PagedBackupInfoList": - """Gets the list of backups available for the specified partition. - - Returns a list of backups available for the specified partition. The server enumerates all the - backups available in the backup store configured in the backup policy. It also allows filtering - of the result based on start and end datetime or just fetching the latest available backup for - the partition. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param latest: Specifies whether to get only the most recent backup available for a partition - for the specified time range. - :type latest: bool - :param start_date_time_filter: Specify the start date time from which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, all backups from the beginning are enumerated. - :type start_date_time_filter: ~datetime.datetime - :param end_date_time_filter: Specify the end date time till which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, enumeration is done till the most recent backup. - :type end_date_time_filter: ~datetime.datetime - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_backup_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if latest is not None: - query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') - if start_date_time_filter is not None: - query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') - if end_date_time_filter is not None: - query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_backup_list.metadata = {'url': '/Partitions/{partitionId}/$/GetBackups'} # type: ignore - - async def suspend_partition_backup( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Suspends periodic backup for the specified partition. - - The partition which is configured to take periodic backups, is suspended for taking further - backups till it is resumed again. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.suspend_partition_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - suspend_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/SuspendBackup'} # type: ignore - - async def resume_partition_backup( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Resumes periodic backup of partition which was previously suspended. - - The previously suspended partition resumes taking periodic backup as per the backup policy - currently configured for the same. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.resume_partition_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/ResumeBackup'} # type: ignore - - async def backup_partition( - self, - partition_id: str, - backup_timeout: Optional[int] = 10, - timeout: Optional[int] = 60, - backup_storage: Optional["_models.BackupStorageDescription"] = None, - **kwargs - ) -> None: - """Triggers backup of the partition's state. - - Creates a backup of the stateful persisted partition's state. In case the partition is already - being periodically backed up, then by default the new backup is created at the same backup - storage. One can also override the same by specifying the backup storage details as part of the - request body. Once the backup is initiated, its progress can be tracked using the - GetBackupProgress operation. - In case, the operation times out, specify a greater backup timeout value in the query - parameter. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param backup_timeout: Specifies the maximum amount of time, in minutes, to wait for the backup - operation to complete. Post that, the operation completes with timeout error. However, in - certain corner cases it could be that though the operation returns back timeout, the backup - actually goes through. In case of timeout error, its recommended to invoke this operation again - with a greater timeout value. The default value for the same is 10 minutes. - :type backup_timeout: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param backup_storage: Specifies the details of the backup storage where to save the backup. - :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _backup_partition_description = _models.BackupPartitionDescription(backup_storage=backup_storage) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.backup_partition.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if backup_timeout is not None: - query_parameters['BackupTimeout'] = self._serialize.query("backup_timeout", backup_timeout, 'int') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _backup_partition_description is not None: - body_content = self._serialize.body(_backup_partition_description, 'BackupPartitionDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - backup_partition.metadata = {'url': '/Partitions/{partitionId}/$/Backup'} # type: ignore - - async def get_partition_backup_progress( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.BackupProgressInfo": - """Gets details for the latest backup triggered for this partition. - - Returns information about the state of the latest backup along with details or failure reason - in case of completion. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BackupProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.BackupProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_backup_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('BackupProgressInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_backup_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupProgress'} # type: ignore - - async def restore_partition( - self, - partition_id: str, - restore_partition_description: "_models.RestorePartitionDescription", - restore_timeout: Optional[int] = 10, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Triggers restore of the state of the partition using the specified restore partition description. - - Restores the state of a of the stateful persisted partition using the specified backup point. - In case the partition is already being periodically backed up, then by default the backup point - is looked for in the storage specified in backup policy. One can also override the same by - specifying the backup storage details as part of the restore partition description in body. - Once the restore is initiated, its progress can be tracked using the GetRestoreProgress - operation. - In case, the operation times out, specify a greater restore timeout value in the query - parameter. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param restore_partition_description: Describes the parameters to restore the partition. - :type restore_partition_description: ~azure.servicefabric.models.RestorePartitionDescription - :param restore_timeout: Specifies the maximum amount of time to wait, in minutes, for the - restore operation to complete. Post that, the operation returns back with timeout error. - However, in certain corner cases it could be that the restore operation goes through even - though it completes with timeout. In case of timeout error, its recommended to invoke this - operation again with a greater timeout value. the default value for the same is 10 minutes. - :type restore_timeout: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.restore_partition.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if restore_timeout is not None: - query_parameters['RestoreTimeout'] = self._serialize.query("restore_timeout", restore_timeout, 'int') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(restore_partition_description, 'RestorePartitionDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - restore_partition.metadata = {'url': '/Partitions/{partitionId}/$/Restore'} # type: ignore - - async def get_partition_restore_progress( - self, - partition_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.RestoreProgressInfo": - """Gets details for the latest restore operation triggered for this partition. - - Returns information about the state of the latest restore operation along with details or - failure reason in case of completion. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RestoreProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RestoreProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RestoreProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_restore_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RestoreProgressInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_restore_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetRestoreProgress'} # type: ignore - - async def get_backups_from_backup_location( - self, - get_backup_by_storage_query_description: "_models.GetBackupByStorageQueryDescription", - timeout: Optional[int] = 60, - continuation_token_parameter: Optional[str] = None, - max_results: Optional[int] = 0, - **kwargs - ) -> "_models.PagedBackupInfoList": - """Gets the list of backups available for the specified backed up entity at the specified backup location. - - Gets the list of backups available for the specified backed up entity (Application, Service or - Partition) at the specified backup location (FileShare or Azure Blob Storage). - - :param get_backup_by_storage_query_description: Describes the filters and backup storage - details to be used for enumerating backups. - :type get_backup_by_storage_query_description: ~azure.servicefabric.models.GetBackupByStorageQueryDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_backups_from_backup_location.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(get_backup_by_storage_query_description, 'GetBackupByStorageQueryDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_backups_from_backup_location.metadata = {'url': '/BackupRestore/$/GetBackups'} # type: ignore - - async def create_name( - self, - name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Creates a Service Fabric name. - - Creates the specified Service Fabric name. - - :param name: The Service Fabric name, including the 'fabric:' URI scheme. - :type name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _name_description = _models.NameDescription(name=name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_name.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_name_description, 'NameDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_name.metadata = {'url': '/Names/$/Create'} # type: ignore - - async def get_name_exists_info( - self, - name_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Returns whether the Service Fabric name exists. - - Returns whether the specified Service Fabric name exists. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_name_exists_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - get_name_exists_info.metadata = {'url': '/Names/{nameId}'} # type: ignore - - async def delete_name( - self, - name_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Deletes a Service Fabric name. - - Deletes the specified Service Fabric name. A name must be created before it can be deleted. - Deleting a name with child properties will fail. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_name.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_name.metadata = {'url': '/Names/{nameId}'} # type: ignore - - async def get_sub_name_info_list( - self, - name_id: str, - recursive: Optional[bool] = False, - continuation_token_parameter: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedSubNameInfoList": - """Enumerates all the Service Fabric names under a given name. - - Enumerates all the Service Fabric names under a given name. If the subnames do not fit in a - page, one page of results is returned as well as a continuation token, which can be used to get - the next page. Querying a name that doesn't exist will fail. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param recursive: Allows specifying that the search performed should be recursive. - :type recursive: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedSubNameInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedSubNameInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSubNameInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_sub_name_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if recursive is not None: - query_parameters['Recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedSubNameInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_sub_name_info_list.metadata = {'url': '/Names/{nameId}/$/GetSubNames'} # type: ignore - - async def get_property_info_list( - self, - name_id: str, - include_values: Optional[bool] = False, - continuation_token_parameter: Optional[str] = None, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PagedPropertyInfoList": - """Gets information on all Service Fabric properties under a given name. - - A Service Fabric name can have one or more named properties that store custom information. This - operation gets the information about these properties in a paged list. The information includes - name, value, and metadata about each of the properties. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param include_values: Allows specifying whether to include the values of the properties - returned. True if values should be returned with the metadata; False to return only property - metadata. - :type include_values: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedPropertyInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedPropertyInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedPropertyInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_property_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if include_values is not None: - query_parameters['IncludeValues'] = self._serialize.query("include_values", include_values, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedPropertyInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_property_info_list.metadata = {'url': '/Names/{nameId}/$/GetProperties'} # type: ignore - - async def put_property( - self, - name_id: str, - property_description: "_models.PropertyDescription", - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Creates or updates a Service Fabric property. - - Creates or updates the specified Service Fabric property under a given name. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param property_description: Describes the Service Fabric property to be created. - :type property_description: ~azure.servicefabric.models.PropertyDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.put_property.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(property_description, 'PropertyDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - put_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore - - async def get_property_info( - self, - name_id: str, - property_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> "_models.PropertyInfo": - """Gets the specified Service Fabric property. - - Gets the specified Service Fabric property under a given name. This will always return both - value and metadata. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param property_name: Specifies the name of the property to get. - :type property_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PropertyInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PropertyInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PropertyInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_property_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PropertyInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_property_info.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore - - async def delete_property( - self, - name_id: str, - property_name: str, - timeout: Optional[int] = 60, - **kwargs - ) -> None: - """Deletes the specified Service Fabric property. - - Deletes the specified Service Fabric property under a given name. A property must be created - before it can be deleted. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param property_name: Specifies the name of the property to get. - :type property_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_property.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore - - async def submit_property_batch( - self, - name_id: str, - timeout: Optional[int] = 60, - operations: Optional[List["_models.PropertyBatchOperation"]] = None, - **kwargs - ) -> Union["_models.SuccessfulPropertyBatchInfo", "_models.FailedPropertyBatchInfo"]: - """Submits a property batch. - - Submits a batch of property operations. Either all or none of the operations will be committed. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param operations: A list of the property batch operations to be executed. - :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SuccessfulPropertyBatchInfo or FailedPropertyBatchInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SuccessfulPropertyBatchInfo or ~azure.servicefabric.models.FailedPropertyBatchInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.SuccessfulPropertyBatchInfo", "_models.FailedPropertyBatchInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _property_batch_description_list = _models.PropertyBatchDescriptionList(operations=operations) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.submit_property_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_property_batch_description_list, 'PropertyBatchDescriptionList') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 409]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if response.status_code == 200: - deserialized = self._deserialize('SuccessfulPropertyBatchInfo', pipeline_response) - - if response.status_code == 409: - deserialized = self._deserialize('FailedPropertyBatchInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - submit_property_batch.metadata = {'url': '/Names/{nameId}/$/GetProperties/$/SubmitBatch'} # type: ignore - - async def get_cluster_event_list( - self, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.ClusterEvent"]: - """Gets all Cluster-related events. - - The response is list of ClusterEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ClusterEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ClusterEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ClusterEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ClusterEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_event_list.metadata = {'url': '/EventsStore/Cluster/Events'} # type: ignore - - async def get_containers_event_list( - self, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.ContainerInstanceEvent"]: - """Gets all Containers-related events. - - The response is list of ContainerInstanceEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ContainerInstanceEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ContainerInstanceEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ContainerInstanceEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_containers_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ContainerInstanceEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_containers_event_list.metadata = {'url': '/EventsStore/Containers/Events'} # type: ignore - - async def get_node_event_list( - self, - node_name: str, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.NodeEvent"]: - """Gets a Node-related events. - - The response is list of NodeEvent objects. - - :param node_name: The name of the node. - :type node_name: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of NodeEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.NodeEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.NodeEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[NodeEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_event_list.metadata = {'url': '/EventsStore/Nodes/{nodeName}/$/Events'} # type: ignore - - async def get_nodes_event_list( - self, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.NodeEvent"]: - """Gets all Nodes-related Events. - - The response is list of NodeEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of NodeEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.NodeEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.NodeEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_nodes_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[NodeEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_nodes_event_list.metadata = {'url': '/EventsStore/Nodes/Events'} # type: ignore - - async def get_application_event_list( - self, - application_id: str, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.ApplicationEvent"]: - """Gets an Application-related events. - - The response is list of ApplicationEvent objects. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ApplicationEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ApplicationEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ApplicationEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_event_list.metadata = {'url': '/EventsStore/Applications/{applicationId}/$/Events'} # type: ignore - - async def get_applications_event_list( - self, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.ApplicationEvent"]: - """Gets all Applications-related events. - - The response is list of ApplicationEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ApplicationEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ApplicationEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_applications_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ApplicationEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_applications_event_list.metadata = {'url': '/EventsStore/Applications/Events'} # type: ignore - - async def get_service_event_list( - self, - service_id: str, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.ServiceEvent"]: - """Gets a Service-related events. - - The response is list of ServiceEvent objects. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ServiceEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ServiceEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ServiceEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_event_list.metadata = {'url': '/EventsStore/Services/{serviceId}/$/Events'} # type: ignore - - async def get_services_event_list( - self, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.ServiceEvent"]: - """Gets all Services-related events. - - The response is list of ServiceEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ServiceEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ServiceEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_services_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ServiceEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_services_event_list.metadata = {'url': '/EventsStore/Services/Events'} # type: ignore - - async def get_partition_event_list( - self, - partition_id: str, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.PartitionEvent"]: - """Gets a Partition-related events. - - The response is list of PartitionEvent objects. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of PartitionEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.PartitionEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PartitionEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[PartitionEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Events'} # type: ignore - - async def get_partitions_event_list( - self, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.PartitionEvent"]: - """Gets all Partitions-related events. - - The response is list of PartitionEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of PartitionEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.PartitionEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PartitionEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partitions_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[PartitionEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partitions_event_list.metadata = {'url': '/EventsStore/Partitions/Events'} # type: ignore - - async def get_partition_replica_event_list( - self, - partition_id: str, - replica_id: str, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.ReplicaEvent"]: - """Gets a Partition Replica-related events. - - The response is list of ReplicaEvent objects. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ReplicaEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ReplicaEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ReplicaEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_replica_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ReplicaEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_replica_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/{replicaId}/$/Events'} # type: ignore - - async def get_partition_replicas_event_list( - self, - partition_id: str, - start_time_utc: str, - end_time_utc: str, - timeout: Optional[int] = 60, - events_types_filter: Optional[str] = None, - exclude_analysis_events: Optional[bool] = None, - skip_correlation_lookup: Optional[bool] = None, - **kwargs - ) -> List["_models.ReplicaEvent"]: - """Gets all Replicas-related events for a Partition. - - The response is list of ReplicaEvent objects. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ReplicaEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ReplicaEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ReplicaEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_replicas_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ReplicaEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_replicas_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/Events'} # type: ignore - - async def get_correlated_event_list( - self, - event_instance_id: str, - timeout: Optional[int] = 60, - **kwargs - ) -> List["_models.FabricEvent"]: - """Gets all correlated events for a given event. - - The response is list of FabricEvents. - - :param event_instance_id: The EventInstanceId. - :type event_instance_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of FabricEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.FabricEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_correlated_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'eventInstanceId': self._serialize.url("event_instance_id", event_instance_id, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[FabricEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_correlated_event_list.metadata = {'url': '/EventsStore/CorrelatedEvents/{eventInstanceId}/$/Events'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/__init__.py index f14afc288e10..d0d38db785a9 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/__init__.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/__init__.py @@ -1,9 +1,12 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- try: @@ -44,6 +47,7 @@ from ._models_py3 import ApplicationScopedVolume from ._models_py3 import ApplicationScopedVolumeCreationParameters from ._models_py3 import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk + from ._models_py3 import ApplicationsHealthEvaluation from ._models_py3 import ApplicationTypeApplicationsHealthEvaluation from ._models_py3 import ApplicationTypeHealthPolicyMapItem from ._models_py3 import ApplicationTypeImageStorePath @@ -57,7 +61,6 @@ from ._models_py3 import ApplicationUpgradeRollbackStartedEvent from ._models_py3 import ApplicationUpgradeStartedEvent from ._models_py3 import ApplicationUpgradeUpdateDescription - from ._models_py3 import ApplicationsHealthEvaluation from ._models_py3 import AutoScalingMechanism from ._models_py3 import AutoScalingMetric from ._models_py3 import AutoScalingPolicy @@ -83,8 +86,8 @@ from ._models_py3 import ChaosCodePackageRestartScheduledEvent from ._models_py3 import ChaosContext from ._models_py3 import ChaosEvent - from ._models_py3 import ChaosEventWrapper from ._models_py3 import ChaosEventsSegment + from ._models_py3 import ChaosEventWrapper from ._models_py3 import ChaosNodeRestartScheduledEvent from ._models_py3 import ChaosParameters from ._models_py3 import ChaosParametersDictionaryItem @@ -146,7 +149,6 @@ from ._models_py3 import DefaultExecutionPolicy from ._models_py3 import DeletePropertyBatchOperation from ._models_py3 import DeltaNodesCheckHealthEvaluation - from ._models_py3 import DeployServicePackageToNodeDescription from ._models_py3 import DeployedApplicationHealth from ._models_py3 import DeployedApplicationHealthEvaluation from ._models_py3 import DeployedApplicationHealthReportExpiredEvent @@ -175,6 +177,7 @@ from ._models_py3 import DeployedStatefulServiceReplicaInfo from ._models_py3 import DeployedStatelessServiceInstanceDetailInfo from ._models_py3 import DeployedStatelessServiceInstanceInfo + from ._models_py3 import DeployServicePackageToNodeDescription from ._models_py3 import DiagnosticsDescription from ._models_py3 import DiagnosticsRef from ._models_py3 import DiagnosticsSinkProperties @@ -200,7 +203,7 @@ from ._models_py3 import ExternalStoreProvisionApplicationTypeDescription from ._models_py3 import FabricCodeVersionInfo from ._models_py3 import FabricConfigVersionInfo - from ._models_py3 import FabricError + from ._models_py3 import FabricError, FabricErrorException from ._models_py3 import FabricErrorError from ._models_py3 import FabricEvent from ._models_py3 import FailedPropertyBatchInfo @@ -242,12 +245,12 @@ from ._models_py3 import InvokeDataLossResult from ._models_py3 import InvokeQuorumLossResult from ._models_py3 import KeyValueStoreReplicaStatus - from ._models_py3 import LoadMetricInformation - from ._models_py3 import LoadMetricReport - from ._models_py3 import LoadMetricReportInfo from ._models_py3 import LoadedPartitionInformationQueryDescription from ._models_py3 import LoadedPartitionInformationResult from ._models_py3 import LoadedPartitionInformationResultList + from ._models_py3 import LoadMetricInformation + from ._models_py3 import LoadMetricReport + from ._models_py3 import LoadMetricReportInfo from ._models_py3 import LocalNetworkResourceProperties from ._models_py3 import ManagedApplicationIdentity from ._models_py3 import ManagedApplicationIdentityDescription @@ -290,6 +293,7 @@ from ._models_py3 import NodeRepairImpactDescription from ._models_py3 import NodeRepairTargetDescription from ._models_py3 import NodeResult + from ._models_py3 import NodesHealthEvaluation from ._models_py3 import NodeTagsDescription from ._models_py3 import NodeTransitionProgress from ._models_py3 import NodeTransitionResult @@ -297,7 +301,6 @@ from ._models_py3 import NodeTypeNodesHealthEvaluation from ._models_py3 import NodeUpEvent from ._models_py3 import NodeUpgradeProgressInfo - from ._models_py3 import NodesHealthEvaluation from ._models_py3 import OperationStatus from ._models_py3 import PackageSharingPolicyInfo from ._models_py3 import PagedApplicationInfoList @@ -389,8 +392,8 @@ from ._models_py3 import ReplicaInfo from ._models_py3 import ReplicaLifecycleDescription from ._models_py3 import ReplicaMetricLoadDescription - from ._models_py3 import ReplicaStatusBase from ._models_py3 import ReplicasHealthEvaluation + from ._models_py3 import ReplicaStatusBase from ._models_py3 import ReplicatorQueueStatus from ._models_py3 import ReplicatorStatus from ._models_py3 import ResolvedServiceEndpoint @@ -422,7 +425,6 @@ from ._models_py3 import SecretValue from ._models_py3 import SecretValueProperties from ._models_py3 import SecretValueResourceDescription - from ._models_py3 import SecretValueResourceProperties from ._models_py3 import SeedNodeSafetyCheck from ._models_py3 import SelectedPartition from ._models_py3 import ServiceBackupConfigurationInfo @@ -451,13 +453,13 @@ from ._models_py3 import ServicePlacementNonPartiallyPlaceServicePolicyDescription from ._models_py3 import ServicePlacementPolicyDescription from ._models_py3 import ServicePlacementPreferPrimaryDomainPolicyDescription - from ._models_py3 import ServicePlacementRequireDomainDistributionPolicyDescription from ._models_py3 import ServicePlacementRequiredDomainPolicyDescription + from ._models_py3 import ServicePlacementRequireDomainDistributionPolicyDescription from ._models_py3 import ServiceProperties from ._models_py3 import ServiceReplicaDescription from ._models_py3 import ServiceReplicaProperties from ._models_py3 import ServiceResourceDescription - from ._models_py3 import ServiceResourceProperties + from ._models_py3 import ServicesHealthEvaluation from ._models_py3 import ServiceTypeDescription from ._models_py3 import ServiceTypeExtensionDescription from ._models_py3 import ServiceTypeHealthPolicy @@ -466,7 +468,6 @@ from ._models_py3 import ServiceTypeManifest from ._models_py3 import ServiceUpdateDescription from ._models_py3 import ServiceUpgradeProgress - from ._models_py3 import ServicesHealthEvaluation from ._models_py3 import Setting from ._models_py3 import SingletonPartitionInformation from ._models_py3 import SingletonPartitionSchemeDescription @@ -526,526 +527,523 @@ from ._models_py3 import WaitForReconfigurationSafetyCheck from ._models_py3 import WaitingChaosEvent except (SyntaxError, ImportError): - from ._models import AadMetadata # type: ignore - from ._models import AadMetadataObject # type: ignore - from ._models import AddRemoveIncrementalNamedPartitionScalingMechanism # type: ignore - from ._models import AddRemoveReplicaScalingMechanism # type: ignore - from ._models import AnalysisEventMetadata # type: ignore - from ._models import ApplicationBackupConfigurationInfo # type: ignore - from ._models import ApplicationBackupEntity # type: ignore - from ._models import ApplicationCapacityDescription # type: ignore - from ._models import ApplicationContainerInstanceExitedEvent # type: ignore - from ._models import ApplicationCreatedEvent # type: ignore - from ._models import ApplicationDeletedEvent # type: ignore - from ._models import ApplicationDescription # type: ignore - from ._models import ApplicationEvent # type: ignore - from ._models import ApplicationHealth # type: ignore - from ._models import ApplicationHealthEvaluation # type: ignore - from ._models import ApplicationHealthPolicies # type: ignore - from ._models import ApplicationHealthPolicy # type: ignore - from ._models import ApplicationHealthPolicyMapItem # type: ignore - from ._models import ApplicationHealthPolicyMapObject # type: ignore - from ._models import ApplicationHealthReportExpiredEvent # type: ignore - from ._models import ApplicationHealthState # type: ignore - from ._models import ApplicationHealthStateChunk # type: ignore - from ._models import ApplicationHealthStateChunkList # type: ignore - from ._models import ApplicationHealthStateFilter # type: ignore - from ._models import ApplicationInfo # type: ignore - from ._models import ApplicationLoadInfo # type: ignore - from ._models import ApplicationLoadMetricInformation # type: ignore - from ._models import ApplicationMetricDescription # type: ignore - from ._models import ApplicationNameInfo # type: ignore - from ._models import ApplicationNewHealthReportEvent # type: ignore - from ._models import ApplicationParameter # type: ignore - from ._models import ApplicationProcessExitedEvent # type: ignore - from ._models import ApplicationResourceDescription # type: ignore - from ._models import ApplicationResourceUpgradeProgressInfo # type: ignore - from ._models import ApplicationScopedVolume # type: ignore - from ._models import ApplicationScopedVolumeCreationParameters # type: ignore - from ._models import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk # type: ignore - from ._models import ApplicationTypeApplicationsHealthEvaluation # type: ignore - from ._models import ApplicationTypeHealthPolicyMapItem # type: ignore - from ._models import ApplicationTypeImageStorePath # type: ignore - from ._models import ApplicationTypeInfo # type: ignore - from ._models import ApplicationTypeManifest # type: ignore - from ._models import ApplicationUpgradeCompletedEvent # type: ignore - from ._models import ApplicationUpgradeDescription # type: ignore - from ._models import ApplicationUpgradeDomainCompletedEvent # type: ignore - from ._models import ApplicationUpgradeProgressInfo # type: ignore - from ._models import ApplicationUpgradeRollbackCompletedEvent # type: ignore - from ._models import ApplicationUpgradeRollbackStartedEvent # type: ignore - from ._models import ApplicationUpgradeStartedEvent # type: ignore - from ._models import ApplicationUpgradeUpdateDescription # type: ignore - from ._models import ApplicationsHealthEvaluation # type: ignore - from ._models import AutoScalingMechanism # type: ignore - from ._models import AutoScalingMetric # type: ignore - from ._models import AutoScalingPolicy # type: ignore - from ._models import AutoScalingResourceMetric # type: ignore - from ._models import AutoScalingTrigger # type: ignore - from ._models import AverageLoadScalingTrigger # type: ignore - from ._models import AveragePartitionLoadScalingTrigger # type: ignore - from ._models import AverageServiceLoadScalingTrigger # type: ignore - from ._models import AzureBlobBackupStorageDescription # type: ignore - from ._models import AzureInternalMonitoringPipelineSinkDescription # type: ignore - from ._models import BackupConfigurationInfo # type: ignore - from ._models import BackupEntity # type: ignore - from ._models import BackupInfo # type: ignore - from ._models import BackupPartitionDescription # type: ignore - from ._models import BackupPolicyDescription # type: ignore - from ._models import BackupProgressInfo # type: ignore - from ._models import BackupScheduleDescription # type: ignore - from ._models import BackupStorageDescription # type: ignore - from ._models import BackupSuspensionInfo # type: ignore - from ._models import BasicRetentionPolicyDescription # type: ignore - from ._models import BinaryPropertyValue # type: ignore - from ._models import Chaos # type: ignore - from ._models import ChaosCodePackageRestartScheduledEvent # type: ignore - from ._models import ChaosContext # type: ignore - from ._models import ChaosEvent # type: ignore - from ._models import ChaosEventWrapper # type: ignore - from ._models import ChaosEventsSegment # type: ignore - from ._models import ChaosNodeRestartScheduledEvent # type: ignore - from ._models import ChaosParameters # type: ignore - from ._models import ChaosParametersDictionaryItem # type: ignore - from ._models import ChaosPartitionPrimaryMoveScheduledEvent # type: ignore - from ._models import ChaosPartitionSecondaryMoveScheduledEvent # type: ignore - from ._models import ChaosReplicaRemovalScheduledEvent # type: ignore - from ._models import ChaosReplicaRestartScheduledEvent # type: ignore - from ._models import ChaosSchedule # type: ignore - from ._models import ChaosScheduleDescription # type: ignore - from ._models import ChaosScheduleJob # type: ignore - from ._models import ChaosScheduleJobActiveDaysOfWeek # type: ignore - from ._models import ChaosStartedEvent # type: ignore - from ._models import ChaosStoppedEvent # type: ignore - from ._models import ChaosTargetFilter # type: ignore - from ._models import CheckExistsPropertyBatchOperation # type: ignore - from ._models import CheckSequencePropertyBatchOperation # type: ignore - from ._models import CheckValuePropertyBatchOperation # type: ignore - from ._models import ClusterConfiguration # type: ignore - from ._models import ClusterConfigurationUpgradeDescription # type: ignore - from ._models import ClusterConfigurationUpgradeStatusInfo # type: ignore - from ._models import ClusterEvent # type: ignore - from ._models import ClusterHealth # type: ignore - from ._models import ClusterHealthChunk # type: ignore - from ._models import ClusterHealthChunkQueryDescription # type: ignore - from ._models import ClusterHealthPolicies # type: ignore - from ._models import ClusterHealthPolicy # type: ignore - from ._models import ClusterHealthReportExpiredEvent # type: ignore - from ._models import ClusterLoadInfo # type: ignore - from ._models import ClusterManifest # type: ignore - from ._models import ClusterNewHealthReportEvent # type: ignore - from ._models import ClusterUpgradeCompletedEvent # type: ignore - from ._models import ClusterUpgradeDescriptionObject # type: ignore - from ._models import ClusterUpgradeDomainCompletedEvent # type: ignore - from ._models import ClusterUpgradeHealthPolicyObject # type: ignore - from ._models import ClusterUpgradeProgressObject # type: ignore - from ._models import ClusterUpgradeRollbackCompletedEvent # type: ignore - from ._models import ClusterUpgradeRollbackStartedEvent # type: ignore - from ._models import ClusterUpgradeStartedEvent # type: ignore - from ._models import ClusterVersion # type: ignore - from ._models import CodePackageEntryPoint # type: ignore - from ._models import CodePackageEntryPointStatistics # type: ignore - from ._models import ComposeDeploymentStatusInfo # type: ignore - from ._models import ComposeDeploymentUpgradeDescription # type: ignore - from ._models import ComposeDeploymentUpgradeProgressInfo # type: ignore - from ._models import ConfigParameterOverride # type: ignore - from ._models import ContainerApiRequestBody # type: ignore - from ._models import ContainerApiResponse # type: ignore - from ._models import ContainerApiResult # type: ignore - from ._models import ContainerCodePackageProperties # type: ignore - from ._models import ContainerEvent # type: ignore - from ._models import ContainerInstanceEvent # type: ignore - from ._models import ContainerInstanceView # type: ignore - from ._models import ContainerLabel # type: ignore - from ._models import ContainerLogs # type: ignore - from ._models import ContainerState # type: ignore - from ._models import CreateComposeDeploymentDescription # type: ignore - from ._models import CurrentUpgradeDomainProgressInfo # type: ignore - from ._models import DeactivationIntentDescription # type: ignore - from ._models import DefaultExecutionPolicy # type: ignore - from ._models import DeletePropertyBatchOperation # type: ignore - from ._models import DeltaNodesCheckHealthEvaluation # type: ignore - from ._models import DeployServicePackageToNodeDescription # type: ignore - from ._models import DeployedApplicationHealth # type: ignore - from ._models import DeployedApplicationHealthEvaluation # type: ignore - from ._models import DeployedApplicationHealthReportExpiredEvent # type: ignore - from ._models import DeployedApplicationHealthState # type: ignore - from ._models import DeployedApplicationHealthStateChunk # type: ignore - from ._models import DeployedApplicationHealthStateChunkList # type: ignore - from ._models import DeployedApplicationHealthStateFilter # type: ignore - from ._models import DeployedApplicationInfo # type: ignore - from ._models import DeployedApplicationNewHealthReportEvent # type: ignore - from ._models import DeployedApplicationsHealthEvaluation # type: ignore - from ._models import DeployedCodePackageInfo # type: ignore - from ._models import DeployedServicePackageHealth # type: ignore - from ._models import DeployedServicePackageHealthEvaluation # type: ignore - from ._models import DeployedServicePackageHealthReportExpiredEvent # type: ignore - from ._models import DeployedServicePackageHealthState # type: ignore - from ._models import DeployedServicePackageHealthStateChunk # type: ignore - from ._models import DeployedServicePackageHealthStateChunkList # type: ignore - from ._models import DeployedServicePackageHealthStateFilter # type: ignore - from ._models import DeployedServicePackageInfo # type: ignore - from ._models import DeployedServicePackageNewHealthReportEvent # type: ignore - from ._models import DeployedServicePackagesHealthEvaluation # type: ignore - from ._models import DeployedServiceReplicaDetailInfo # type: ignore - from ._models import DeployedServiceReplicaInfo # type: ignore - from ._models import DeployedServiceTypeInfo # type: ignore - from ._models import DeployedStatefulServiceReplicaDetailInfo # type: ignore - from ._models import DeployedStatefulServiceReplicaInfo # type: ignore - from ._models import DeployedStatelessServiceInstanceDetailInfo # type: ignore - from ._models import DeployedStatelessServiceInstanceInfo # type: ignore - from ._models import DiagnosticsDescription # type: ignore - from ._models import DiagnosticsRef # type: ignore - from ._models import DiagnosticsSinkProperties # type: ignore - from ._models import DisableBackupDescription # type: ignore - from ._models import DiskInfo # type: ignore - from ._models import DoublePropertyValue # type: ignore - from ._models import DsmsAzureBlobBackupStorageDescription # type: ignore - from ._models import EnableBackupDescription # type: ignore - from ._models import EndpointProperties # type: ignore - from ._models import EndpointRef # type: ignore - from ._models import EnsureAvailabilitySafetyCheck # type: ignore - from ._models import EnsurePartitionQuorumSafetyCheck # type: ignore - from ._models import EntityHealth # type: ignore - from ._models import EntityHealthState # type: ignore - from ._models import EntityHealthStateChunk # type: ignore - from ._models import EntityHealthStateChunkList # type: ignore - from ._models import EntityKindHealthStateCount # type: ignore - from ._models import EnvironmentVariable # type: ignore - from ._models import Epoch # type: ignore - from ._models import EventHealthEvaluation # type: ignore - from ._models import ExecutingFaultsChaosEvent # type: ignore - from ._models import ExecutionPolicy # type: ignore - from ._models import ExternalStoreProvisionApplicationTypeDescription # type: ignore - from ._models import FabricCodeVersionInfo # type: ignore - from ._models import FabricConfigVersionInfo # type: ignore - from ._models import FabricError # type: ignore - from ._models import FabricErrorError # type: ignore - from ._models import FabricEvent # type: ignore - from ._models import FailedPropertyBatchInfo # type: ignore - from ._models import FailedUpgradeDomainProgressObject # type: ignore - from ._models import FailureUpgradeDomainProgressInfo # type: ignore - from ._models import FileInfo # type: ignore - from ._models import FileShareBackupStorageDescription # type: ignore - from ._models import FileVersion # type: ignore - from ._models import FolderInfo # type: ignore - from ._models import FolderSizeInfo # type: ignore - from ._models import FrequencyBasedBackupScheduleDescription # type: ignore - from ._models import GatewayDestination # type: ignore - from ._models import GatewayResourceDescription # type: ignore - from ._models import GetBackupByStorageQueryDescription # type: ignore - from ._models import GetPropertyBatchOperation # type: ignore - from ._models import GuidPropertyValue # type: ignore - from ._models import HealthEvaluation # type: ignore - from ._models import HealthEvaluationWrapper # type: ignore - from ._models import HealthEvent # type: ignore - from ._models import HealthInformation # type: ignore - from ._models import HealthStateCount # type: ignore - from ._models import HealthStatistics # type: ignore - from ._models import HttpConfig # type: ignore - from ._models import HttpHostConfig # type: ignore - from ._models import HttpRouteConfig # type: ignore - from ._models import HttpRouteMatchHeader # type: ignore - from ._models import HttpRouteMatchPath # type: ignore - from ._models import HttpRouteMatchRule # type: ignore - from ._models import IdentityDescription # type: ignore - from ._models import IdentityItemDescription # type: ignore - from ._models import ImageRegistryCredential # type: ignore - from ._models import ImageStoreContent # type: ignore - from ._models import ImageStoreCopyDescription # type: ignore - from ._models import ImageStoreInfo # type: ignore - from ._models import InlinedValueSecretResourceProperties # type: ignore - from ._models import InstanceLifecycleDescription # type: ignore - from ._models import Int64PropertyValue # type: ignore - from ._models import Int64RangePartitionInformation # type: ignore - from ._models import InvokeDataLossResult # type: ignore - from ._models import InvokeQuorumLossResult # type: ignore - from ._models import KeyValueStoreReplicaStatus # type: ignore - from ._models import LoadMetricInformation # type: ignore - from ._models import LoadMetricReport # type: ignore - from ._models import LoadMetricReportInfo # type: ignore - from ._models import LoadedPartitionInformationQueryDescription # type: ignore - from ._models import LoadedPartitionInformationResult # type: ignore - from ._models import LoadedPartitionInformationResultList # type: ignore - from ._models import LocalNetworkResourceProperties # type: ignore - from ._models import ManagedApplicationIdentity # type: ignore - from ._models import ManagedApplicationIdentityDescription # type: ignore - from ._models import ManagedIdentityAzureBlobBackupStorageDescription # type: ignore - from ._models import MetricLoadDescription # type: ignore - from ._models import MonitoringPolicyDescription # type: ignore - from ._models import NameDescription # type: ignore - from ._models import NamedPartitionInformation # type: ignore - from ._models import NamedPartitionSchemeDescription # type: ignore - from ._models import NetworkRef # type: ignore - from ._models import NetworkResourceDescription # type: ignore - from ._models import NetworkResourceProperties # type: ignore - from ._models import NetworkResourcePropertiesBase # type: ignore - from ._models import NodeAbortedEvent # type: ignore - from ._models import NodeAddedToClusterEvent # type: ignore - from ._models import NodeClosedEvent # type: ignore - from ._models import NodeDeactivateCompletedEvent # type: ignore - from ._models import NodeDeactivateStartedEvent # type: ignore - from ._models import NodeDeactivationInfo # type: ignore - from ._models import NodeDeactivationTask # type: ignore - from ._models import NodeDeactivationTaskId # type: ignore - from ._models import NodeDownEvent # type: ignore - from ._models import NodeEvent # type: ignore - from ._models import NodeHealth # type: ignore - from ._models import NodeHealthEvaluation # type: ignore - from ._models import NodeHealthReportExpiredEvent # type: ignore - from ._models import NodeHealthState # type: ignore - from ._models import NodeHealthStateChunk # type: ignore - from ._models import NodeHealthStateChunkList # type: ignore - from ._models import NodeHealthStateFilter # type: ignore - from ._models import NodeId # type: ignore - from ._models import NodeImpact # type: ignore - from ._models import NodeInfo # type: ignore - from ._models import NodeLoadInfo # type: ignore - from ._models import NodeLoadMetricInformation # type: ignore - from ._models import NodeNewHealthReportEvent # type: ignore - from ._models import NodeOpenFailedEvent # type: ignore - from ._models import NodeOpenSucceededEvent # type: ignore - from ._models import NodeRemovedFromClusterEvent # type: ignore - from ._models import NodeRepairImpactDescription # type: ignore - from ._models import NodeRepairTargetDescription # type: ignore - from ._models import NodeResult # type: ignore - from ._models import NodeTagsDescription # type: ignore - from ._models import NodeTransitionProgress # type: ignore - from ._models import NodeTransitionResult # type: ignore - from ._models import NodeTypeHealthPolicyMapItem # type: ignore - from ._models import NodeTypeNodesHealthEvaluation # type: ignore - from ._models import NodeUpEvent # type: ignore - from ._models import NodeUpgradeProgressInfo # type: ignore - from ._models import NodesHealthEvaluation # type: ignore - from ._models import OperationStatus # type: ignore - from ._models import PackageSharingPolicyInfo # type: ignore - from ._models import PagedApplicationInfoList # type: ignore - from ._models import PagedApplicationResourceDescriptionList # type: ignore - from ._models import PagedApplicationTypeInfoList # type: ignore - from ._models import PagedBackupConfigurationInfoList # type: ignore - from ._models import PagedBackupEntityList # type: ignore - from ._models import PagedBackupInfoList # type: ignore - from ._models import PagedBackupPolicyDescriptionList # type: ignore - from ._models import PagedComposeDeploymentStatusInfoList # type: ignore - from ._models import PagedDeployedApplicationInfoList # type: ignore - from ._models import PagedGatewayResourceDescriptionList # type: ignore - from ._models import PagedNetworkResourceDescriptionList # type: ignore - from ._models import PagedNodeInfoList # type: ignore - from ._models import PagedPropertyInfoList # type: ignore - from ._models import PagedReplicaInfoList # type: ignore - from ._models import PagedSecretResourceDescriptionList # type: ignore - from ._models import PagedSecretValueResourceDescriptionList # type: ignore - from ._models import PagedServiceInfoList # type: ignore - from ._models import PagedServicePartitionInfoList # type: ignore - from ._models import PagedServiceReplicaDescriptionList # type: ignore - from ._models import PagedServiceResourceDescriptionList # type: ignore - from ._models import PagedSubNameInfoList # type: ignore - from ._models import PagedUpdatePartitionLoadResultList # type: ignore - from ._models import PagedVolumeResourceDescriptionList # type: ignore - from ._models import PartitionAnalysisEvent # type: ignore - from ._models import PartitionBackupConfigurationInfo # type: ignore - from ._models import PartitionBackupEntity # type: ignore - from ._models import PartitionDataLossProgress # type: ignore - from ._models import PartitionEvent # type: ignore - from ._models import PartitionHealth # type: ignore - from ._models import PartitionHealthEvaluation # type: ignore - from ._models import PartitionHealthReportExpiredEvent # type: ignore - from ._models import PartitionHealthState # type: ignore - from ._models import PartitionHealthStateChunk # type: ignore - from ._models import PartitionHealthStateChunkList # type: ignore - from ._models import PartitionHealthStateFilter # type: ignore - from ._models import PartitionInformation # type: ignore - from ._models import PartitionInstanceCountScaleMechanism # type: ignore - from ._models import PartitionLoadInformation # type: ignore - from ._models import PartitionMetricLoadDescription # type: ignore - from ._models import PartitionNewHealthReportEvent # type: ignore - from ._models import PartitionPrimaryMoveAnalysisEvent # type: ignore - from ._models import PartitionQuorumLossProgress # type: ignore - from ._models import PartitionReconfiguredEvent # type: ignore - from ._models import PartitionRestartProgress # type: ignore - from ._models import PartitionSafetyCheck # type: ignore - from ._models import PartitionSchemeDescription # type: ignore - from ._models import PartitionsHealthEvaluation # type: ignore - from ._models import PrimaryReplicatorStatus # type: ignore - from ._models import Probe # type: ignore - from ._models import ProbeExec # type: ignore - from ._models import ProbeHttpGet # type: ignore - from ._models import ProbeHttpGetHeaders # type: ignore - from ._models import ProbeTcpSocket # type: ignore - from ._models import PropertyBatchDescriptionList # type: ignore - from ._models import PropertyBatchInfo # type: ignore - from ._models import PropertyBatchOperation # type: ignore - from ._models import PropertyDescription # type: ignore - from ._models import PropertyInfo # type: ignore - from ._models import PropertyMetadata # type: ignore - from ._models import PropertyValue # type: ignore - from ._models import ProvisionApplicationTypeDescription # type: ignore - from ._models import ProvisionApplicationTypeDescriptionBase # type: ignore - from ._models import ProvisionFabricDescription # type: ignore - from ._models import PutPropertyBatchOperation # type: ignore - from ._models import ReconfigurationInformation # type: ignore - from ._models import RegistryCredential # type: ignore - from ._models import ReliableCollectionsRef # type: ignore - from ._models import RemoteReplicatorAcknowledgementDetail # type: ignore - from ._models import RemoteReplicatorAcknowledgementStatus # type: ignore - from ._models import RemoteReplicatorStatus # type: ignore - from ._models import RepairImpactDescriptionBase # type: ignore - from ._models import RepairTargetDescriptionBase # type: ignore - from ._models import RepairTask # type: ignore - from ._models import RepairTaskApproveDescription # type: ignore - from ._models import RepairTaskCancelDescription # type: ignore - from ._models import RepairTaskDeleteDescription # type: ignore - from ._models import RepairTaskHistory # type: ignore - from ._models import RepairTaskUpdateHealthPolicyDescription # type: ignore - from ._models import RepairTaskUpdateInfo # type: ignore - from ._models import ReplicaEvent # type: ignore - from ._models import ReplicaHealth # type: ignore - from ._models import ReplicaHealthEvaluation # type: ignore - from ._models import ReplicaHealthState # type: ignore - from ._models import ReplicaHealthStateChunk # type: ignore - from ._models import ReplicaHealthStateChunkList # type: ignore - from ._models import ReplicaHealthStateFilter # type: ignore - from ._models import ReplicaInfo # type: ignore - from ._models import ReplicaLifecycleDescription # type: ignore - from ._models import ReplicaMetricLoadDescription # type: ignore - from ._models import ReplicaStatusBase # type: ignore - from ._models import ReplicasHealthEvaluation # type: ignore - from ._models import ReplicatorQueueStatus # type: ignore - from ._models import ReplicatorStatus # type: ignore - from ._models import ResolvedServiceEndpoint # type: ignore - from ._models import ResolvedServicePartition # type: ignore - from ._models import ResourceLimits # type: ignore - from ._models import ResourceRequests # type: ignore - from ._models import ResourceRequirements # type: ignore - from ._models import RestartDeployedCodePackageDescription # type: ignore - from ._models import RestartNodeDescription # type: ignore - from ._models import RestartPartitionResult # type: ignore - from ._models import RestorePartitionDescription # type: ignore - from ._models import RestoreProgressInfo # type: ignore - from ._models import ResumeApplicationUpgradeDescription # type: ignore - from ._models import ResumeClusterUpgradeDescription # type: ignore - from ._models import RetentionPolicyDescription # type: ignore - from ._models import RollingUpgradeUpdateDescription # type: ignore - from ._models import RunToCompletionExecutionPolicy # type: ignore - from ._models import SafetyCheck # type: ignore - from ._models import SafetyCheckWrapper # type: ignore - from ._models import ScalingMechanismDescription # type: ignore - from ._models import ScalingPolicyDescription # type: ignore - from ._models import ScalingTriggerDescription # type: ignore - from ._models import SecondaryActiveReplicatorStatus # type: ignore - from ._models import SecondaryIdleReplicatorStatus # type: ignore - from ._models import SecondaryReplicatorStatus # type: ignore - from ._models import SecretResourceDescription # type: ignore - from ._models import SecretResourceProperties # type: ignore - from ._models import SecretResourcePropertiesBase # type: ignore - from ._models import SecretValue # type: ignore - from ._models import SecretValueProperties # type: ignore - from ._models import SecretValueResourceDescription # type: ignore - from ._models import SecretValueResourceProperties # type: ignore - from ._models import SeedNodeSafetyCheck # type: ignore - from ._models import SelectedPartition # type: ignore - from ._models import ServiceBackupConfigurationInfo # type: ignore - from ._models import ServiceBackupEntity # type: ignore - from ._models import ServiceCorrelationDescription # type: ignore - from ._models import ServiceCreatedEvent # type: ignore - from ._models import ServiceDeletedEvent # type: ignore - from ._models import ServiceDescription # type: ignore - from ._models import ServiceEvent # type: ignore - from ._models import ServiceFromTemplateDescription # type: ignore - from ._models import ServiceHealth # type: ignore - from ._models import ServiceHealthEvaluation # type: ignore - from ._models import ServiceHealthReportExpiredEvent # type: ignore - from ._models import ServiceHealthState # type: ignore - from ._models import ServiceHealthStateChunk # type: ignore - from ._models import ServiceHealthStateChunkList # type: ignore - from ._models import ServiceHealthStateFilter # type: ignore - from ._models import ServiceIdentity # type: ignore - from ._models import ServiceInfo # type: ignore - from ._models import ServiceLoadMetricDescription # type: ignore - from ._models import ServiceNameInfo # type: ignore - from ._models import ServiceNewHealthReportEvent # type: ignore - from ._models import ServicePartitionInfo # type: ignore - from ._models import ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription # type: ignore - from ._models import ServicePlacementInvalidDomainPolicyDescription # type: ignore - from ._models import ServicePlacementNonPartiallyPlaceServicePolicyDescription # type: ignore - from ._models import ServicePlacementPolicyDescription # type: ignore - from ._models import ServicePlacementPreferPrimaryDomainPolicyDescription # type: ignore - from ._models import ServicePlacementRequireDomainDistributionPolicyDescription # type: ignore - from ._models import ServicePlacementRequiredDomainPolicyDescription # type: ignore - from ._models import ServiceProperties # type: ignore - from ._models import ServiceReplicaDescription # type: ignore - from ._models import ServiceReplicaProperties # type: ignore - from ._models import ServiceResourceDescription # type: ignore - from ._models import ServiceResourceProperties # type: ignore - from ._models import ServiceTypeDescription # type: ignore - from ._models import ServiceTypeExtensionDescription # type: ignore - from ._models import ServiceTypeHealthPolicy # type: ignore - from ._models import ServiceTypeHealthPolicyMapItem # type: ignore - from ._models import ServiceTypeInfo # type: ignore - from ._models import ServiceTypeManifest # type: ignore - from ._models import ServiceUpdateDescription # type: ignore - from ._models import ServiceUpgradeProgress # type: ignore - from ._models import ServicesHealthEvaluation # type: ignore - from ._models import Setting # type: ignore - from ._models import SingletonPartitionInformation # type: ignore - from ._models import SingletonPartitionSchemeDescription # type: ignore - from ._models import StartClusterUpgradeDescription # type: ignore - from ._models import StartedChaosEvent # type: ignore - from ._models import StatefulReplicaHealthReportExpiredEvent # type: ignore - from ._models import StatefulReplicaNewHealthReportEvent # type: ignore - from ._models import StatefulServiceDescription # type: ignore - from ._models import StatefulServiceInfo # type: ignore - from ._models import StatefulServicePartitionInfo # type: ignore - from ._models import StatefulServiceReplicaHealth # type: ignore - from ._models import StatefulServiceReplicaHealthState # type: ignore - from ._models import StatefulServiceReplicaInfo # type: ignore - from ._models import StatefulServiceTypeDescription # type: ignore - from ._models import StatefulServiceUpdateDescription # type: ignore - from ._models import StatelessReplicaHealthReportExpiredEvent # type: ignore - from ._models import StatelessReplicaNewHealthReportEvent # type: ignore - from ._models import StatelessServiceDescription # type: ignore - from ._models import StatelessServiceInfo # type: ignore - from ._models import StatelessServiceInstanceHealth # type: ignore - from ._models import StatelessServiceInstanceHealthState # type: ignore - from ._models import StatelessServiceInstanceInfo # type: ignore - from ._models import StatelessServicePartitionInfo # type: ignore - from ._models import StatelessServiceTypeDescription # type: ignore - from ._models import StatelessServiceUpdateDescription # type: ignore - from ._models import StoppedChaosEvent # type: ignore - from ._models import StringPropertyValue # type: ignore - from ._models import SuccessfulPropertyBatchInfo # type: ignore - from ._models import SystemApplicationHealthEvaluation # type: ignore - from ._models import TcpConfig # type: ignore - from ._models import TestErrorChaosEvent # type: ignore - from ._models import TimeBasedBackupScheduleDescription # type: ignore - from ._models import TimeOfDay # type: ignore - from ._models import TimeRange # type: ignore - from ._models import UniformInt64RangePartitionSchemeDescription # type: ignore - from ._models import UnplacedReplicaInformation # type: ignore - from ._models import UnprovisionApplicationTypeDescriptionInfo # type: ignore - from ._models import UnprovisionFabricDescription # type: ignore - from ._models import UpdateClusterUpgradeDescription # type: ignore - from ._models import UpdatePartitionLoadResult # type: ignore - from ._models import UpgradeDomainDeltaNodesCheckHealthEvaluation # type: ignore - from ._models import UpgradeDomainInfo # type: ignore - from ._models import UpgradeDomainNodesHealthEvaluation # type: ignore - from ._models import UpgradeOrchestrationServiceState # type: ignore - from ._models import UpgradeOrchestrationServiceStateSummary # type: ignore - from ._models import UploadChunkRange # type: ignore - from ._models import UploadSession # type: ignore - from ._models import UploadSessionInfo # type: ignore - from ._models import UsageInfo # type: ignore - from ._models import ValidationFailedChaosEvent # type: ignore - from ._models import VolumeProviderParametersAzureFile # type: ignore - from ._models import VolumeReference # type: ignore - from ._models import VolumeResourceDescription # type: ignore - from ._models import WaitForInbuildReplicaSafetyCheck # type: ignore - from ._models import WaitForPrimaryPlacementSafetyCheck # type: ignore - from ._models import WaitForPrimarySwapSafetyCheck # type: ignore - from ._models import WaitForReconfigurationSafetyCheck # type: ignore - from ._models import WaitingChaosEvent # type: ignore - -from ._service_fabric_client_apis_enums import ( + from ._models import AadMetadata + from ._models import AadMetadataObject + from ._models import AddRemoveIncrementalNamedPartitionScalingMechanism + from ._models import AddRemoveReplicaScalingMechanism + from ._models import AnalysisEventMetadata + from ._models import ApplicationBackupConfigurationInfo + from ._models import ApplicationBackupEntity + from ._models import ApplicationCapacityDescription + from ._models import ApplicationContainerInstanceExitedEvent + from ._models import ApplicationCreatedEvent + from ._models import ApplicationDeletedEvent + from ._models import ApplicationDescription + from ._models import ApplicationEvent + from ._models import ApplicationHealth + from ._models import ApplicationHealthEvaluation + from ._models import ApplicationHealthPolicies + from ._models import ApplicationHealthPolicy + from ._models import ApplicationHealthPolicyMapItem + from ._models import ApplicationHealthPolicyMapObject + from ._models import ApplicationHealthReportExpiredEvent + from ._models import ApplicationHealthState + from ._models import ApplicationHealthStateChunk + from ._models import ApplicationHealthStateChunkList + from ._models import ApplicationHealthStateFilter + from ._models import ApplicationInfo + from ._models import ApplicationLoadInfo + from ._models import ApplicationLoadMetricInformation + from ._models import ApplicationMetricDescription + from ._models import ApplicationNameInfo + from ._models import ApplicationNewHealthReportEvent + from ._models import ApplicationParameter + from ._models import ApplicationProcessExitedEvent + from ._models import ApplicationResourceDescription + from ._models import ApplicationResourceUpgradeProgressInfo + from ._models import ApplicationScopedVolume + from ._models import ApplicationScopedVolumeCreationParameters + from ._models import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk + from ._models import ApplicationsHealthEvaluation + from ._models import ApplicationTypeApplicationsHealthEvaluation + from ._models import ApplicationTypeHealthPolicyMapItem + from ._models import ApplicationTypeImageStorePath + from ._models import ApplicationTypeInfo + from ._models import ApplicationTypeManifest + from ._models import ApplicationUpgradeCompletedEvent + from ._models import ApplicationUpgradeDescription + from ._models import ApplicationUpgradeDomainCompletedEvent + from ._models import ApplicationUpgradeProgressInfo + from ._models import ApplicationUpgradeRollbackCompletedEvent + from ._models import ApplicationUpgradeRollbackStartedEvent + from ._models import ApplicationUpgradeStartedEvent + from ._models import ApplicationUpgradeUpdateDescription + from ._models import AutoScalingMechanism + from ._models import AutoScalingMetric + from ._models import AutoScalingPolicy + from ._models import AutoScalingResourceMetric + from ._models import AutoScalingTrigger + from ._models import AverageLoadScalingTrigger + from ._models import AveragePartitionLoadScalingTrigger + from ._models import AverageServiceLoadScalingTrigger + from ._models import AzureBlobBackupStorageDescription + from ._models import AzureInternalMonitoringPipelineSinkDescription + from ._models import BackupConfigurationInfo + from ._models import BackupEntity + from ._models import BackupInfo + from ._models import BackupPartitionDescription + from ._models import BackupPolicyDescription + from ._models import BackupProgressInfo + from ._models import BackupScheduleDescription + from ._models import BackupStorageDescription + from ._models import BackupSuspensionInfo + from ._models import BasicRetentionPolicyDescription + from ._models import BinaryPropertyValue + from ._models import Chaos + from ._models import ChaosCodePackageRestartScheduledEvent + from ._models import ChaosContext + from ._models import ChaosEvent + from ._models import ChaosEventsSegment + from ._models import ChaosEventWrapper + from ._models import ChaosNodeRestartScheduledEvent + from ._models import ChaosParameters + from ._models import ChaosParametersDictionaryItem + from ._models import ChaosPartitionPrimaryMoveScheduledEvent + from ._models import ChaosPartitionSecondaryMoveScheduledEvent + from ._models import ChaosReplicaRemovalScheduledEvent + from ._models import ChaosReplicaRestartScheduledEvent + from ._models import ChaosSchedule + from ._models import ChaosScheduleDescription + from ._models import ChaosScheduleJob + from ._models import ChaosScheduleJobActiveDaysOfWeek + from ._models import ChaosStartedEvent + from ._models import ChaosStoppedEvent + from ._models import ChaosTargetFilter + from ._models import CheckExistsPropertyBatchOperation + from ._models import CheckSequencePropertyBatchOperation + from ._models import CheckValuePropertyBatchOperation + from ._models import ClusterConfiguration + from ._models import ClusterConfigurationUpgradeDescription + from ._models import ClusterConfigurationUpgradeStatusInfo + from ._models import ClusterEvent + from ._models import ClusterHealth + from ._models import ClusterHealthChunk + from ._models import ClusterHealthChunkQueryDescription + from ._models import ClusterHealthPolicies + from ._models import ClusterHealthPolicy + from ._models import ClusterHealthReportExpiredEvent + from ._models import ClusterLoadInfo + from ._models import ClusterManifest + from ._models import ClusterNewHealthReportEvent + from ._models import ClusterUpgradeCompletedEvent + from ._models import ClusterUpgradeDescriptionObject + from ._models import ClusterUpgradeDomainCompletedEvent + from ._models import ClusterUpgradeHealthPolicyObject + from ._models import ClusterUpgradeProgressObject + from ._models import ClusterUpgradeRollbackCompletedEvent + from ._models import ClusterUpgradeRollbackStartedEvent + from ._models import ClusterUpgradeStartedEvent + from ._models import ClusterVersion + from ._models import CodePackageEntryPoint + from ._models import CodePackageEntryPointStatistics + from ._models import ComposeDeploymentStatusInfo + from ._models import ComposeDeploymentUpgradeDescription + from ._models import ComposeDeploymentUpgradeProgressInfo + from ._models import ConfigParameterOverride + from ._models import ContainerApiRequestBody + from ._models import ContainerApiResponse + from ._models import ContainerApiResult + from ._models import ContainerCodePackageProperties + from ._models import ContainerEvent + from ._models import ContainerInstanceEvent + from ._models import ContainerInstanceView + from ._models import ContainerLabel + from ._models import ContainerLogs + from ._models import ContainerState + from ._models import CreateComposeDeploymentDescription + from ._models import CurrentUpgradeDomainProgressInfo + from ._models import DeactivationIntentDescription + from ._models import DefaultExecutionPolicy + from ._models import DeletePropertyBatchOperation + from ._models import DeltaNodesCheckHealthEvaluation + from ._models import DeployedApplicationHealth + from ._models import DeployedApplicationHealthEvaluation + from ._models import DeployedApplicationHealthReportExpiredEvent + from ._models import DeployedApplicationHealthState + from ._models import DeployedApplicationHealthStateChunk + from ._models import DeployedApplicationHealthStateChunkList + from ._models import DeployedApplicationHealthStateFilter + from ._models import DeployedApplicationInfo + from ._models import DeployedApplicationNewHealthReportEvent + from ._models import DeployedApplicationsHealthEvaluation + from ._models import DeployedCodePackageInfo + from ._models import DeployedServicePackageHealth + from ._models import DeployedServicePackageHealthEvaluation + from ._models import DeployedServicePackageHealthReportExpiredEvent + from ._models import DeployedServicePackageHealthState + from ._models import DeployedServicePackageHealthStateChunk + from ._models import DeployedServicePackageHealthStateChunkList + from ._models import DeployedServicePackageHealthStateFilter + from ._models import DeployedServicePackageInfo + from ._models import DeployedServicePackageNewHealthReportEvent + from ._models import DeployedServicePackagesHealthEvaluation + from ._models import DeployedServiceReplicaDetailInfo + from ._models import DeployedServiceReplicaInfo + from ._models import DeployedServiceTypeInfo + from ._models import DeployedStatefulServiceReplicaDetailInfo + from ._models import DeployedStatefulServiceReplicaInfo + from ._models import DeployedStatelessServiceInstanceDetailInfo + from ._models import DeployedStatelessServiceInstanceInfo + from ._models import DeployServicePackageToNodeDescription + from ._models import DiagnosticsDescription + from ._models import DiagnosticsRef + from ._models import DiagnosticsSinkProperties + from ._models import DisableBackupDescription + from ._models import DiskInfo + from ._models import DoublePropertyValue + from ._models import DsmsAzureBlobBackupStorageDescription + from ._models import EnableBackupDescription + from ._models import EndpointProperties + from ._models import EndpointRef + from ._models import EnsureAvailabilitySafetyCheck + from ._models import EnsurePartitionQuorumSafetyCheck + from ._models import EntityHealth + from ._models import EntityHealthState + from ._models import EntityHealthStateChunk + from ._models import EntityHealthStateChunkList + from ._models import EntityKindHealthStateCount + from ._models import EnvironmentVariable + from ._models import Epoch + from ._models import EventHealthEvaluation + from ._models import ExecutingFaultsChaosEvent + from ._models import ExecutionPolicy + from ._models import ExternalStoreProvisionApplicationTypeDescription + from ._models import FabricCodeVersionInfo + from ._models import FabricConfigVersionInfo + from ._models import FabricError, FabricErrorException + from ._models import FabricErrorError + from ._models import FabricEvent + from ._models import FailedPropertyBatchInfo + from ._models import FailedUpgradeDomainProgressObject + from ._models import FailureUpgradeDomainProgressInfo + from ._models import FileInfo + from ._models import FileShareBackupStorageDescription + from ._models import FileVersion + from ._models import FolderInfo + from ._models import FolderSizeInfo + from ._models import FrequencyBasedBackupScheduleDescription + from ._models import GatewayDestination + from ._models import GatewayResourceDescription + from ._models import GetBackupByStorageQueryDescription + from ._models import GetPropertyBatchOperation + from ._models import GuidPropertyValue + from ._models import HealthEvaluation + from ._models import HealthEvaluationWrapper + from ._models import HealthEvent + from ._models import HealthInformation + from ._models import HealthStateCount + from ._models import HealthStatistics + from ._models import HttpConfig + from ._models import HttpHostConfig + from ._models import HttpRouteConfig + from ._models import HttpRouteMatchHeader + from ._models import HttpRouteMatchPath + from ._models import HttpRouteMatchRule + from ._models import IdentityDescription + from ._models import IdentityItemDescription + from ._models import ImageRegistryCredential + from ._models import ImageStoreContent + from ._models import ImageStoreCopyDescription + from ._models import ImageStoreInfo + from ._models import InlinedValueSecretResourceProperties + from ._models import InstanceLifecycleDescription + from ._models import Int64PropertyValue + from ._models import Int64RangePartitionInformation + from ._models import InvokeDataLossResult + from ._models import InvokeQuorumLossResult + from ._models import KeyValueStoreReplicaStatus + from ._models import LoadedPartitionInformationQueryDescription + from ._models import LoadedPartitionInformationResult + from ._models import LoadedPartitionInformationResultList + from ._models import LoadMetricInformation + from ._models import LoadMetricReport + from ._models import LoadMetricReportInfo + from ._models import LocalNetworkResourceProperties + from ._models import ManagedApplicationIdentity + from ._models import ManagedApplicationIdentityDescription + from ._models import ManagedIdentityAzureBlobBackupStorageDescription + from ._models import MetricLoadDescription + from ._models import MonitoringPolicyDescription + from ._models import NameDescription + from ._models import NamedPartitionInformation + from ._models import NamedPartitionSchemeDescription + from ._models import NetworkRef + from ._models import NetworkResourceDescription + from ._models import NetworkResourceProperties + from ._models import NetworkResourcePropertiesBase + from ._models import NodeAbortedEvent + from ._models import NodeAddedToClusterEvent + from ._models import NodeClosedEvent + from ._models import NodeDeactivateCompletedEvent + from ._models import NodeDeactivateStartedEvent + from ._models import NodeDeactivationInfo + from ._models import NodeDeactivationTask + from ._models import NodeDeactivationTaskId + from ._models import NodeDownEvent + from ._models import NodeEvent + from ._models import NodeHealth + from ._models import NodeHealthEvaluation + from ._models import NodeHealthReportExpiredEvent + from ._models import NodeHealthState + from ._models import NodeHealthStateChunk + from ._models import NodeHealthStateChunkList + from ._models import NodeHealthStateFilter + from ._models import NodeId + from ._models import NodeImpact + from ._models import NodeInfo + from ._models import NodeLoadInfo + from ._models import NodeLoadMetricInformation + from ._models import NodeNewHealthReportEvent + from ._models import NodeOpenFailedEvent + from ._models import NodeOpenSucceededEvent + from ._models import NodeRemovedFromClusterEvent + from ._models import NodeRepairImpactDescription + from ._models import NodeRepairTargetDescription + from ._models import NodeResult + from ._models import NodesHealthEvaluation + from ._models import NodeTagsDescription + from ._models import NodeTransitionProgress + from ._models import NodeTransitionResult + from ._models import NodeTypeHealthPolicyMapItem + from ._models import NodeTypeNodesHealthEvaluation + from ._models import NodeUpEvent + from ._models import NodeUpgradeProgressInfo + from ._models import OperationStatus + from ._models import PackageSharingPolicyInfo + from ._models import PagedApplicationInfoList + from ._models import PagedApplicationResourceDescriptionList + from ._models import PagedApplicationTypeInfoList + from ._models import PagedBackupConfigurationInfoList + from ._models import PagedBackupEntityList + from ._models import PagedBackupInfoList + from ._models import PagedBackupPolicyDescriptionList + from ._models import PagedComposeDeploymentStatusInfoList + from ._models import PagedDeployedApplicationInfoList + from ._models import PagedGatewayResourceDescriptionList + from ._models import PagedNetworkResourceDescriptionList + from ._models import PagedNodeInfoList + from ._models import PagedPropertyInfoList + from ._models import PagedReplicaInfoList + from ._models import PagedSecretResourceDescriptionList + from ._models import PagedSecretValueResourceDescriptionList + from ._models import PagedServiceInfoList + from ._models import PagedServicePartitionInfoList + from ._models import PagedServiceReplicaDescriptionList + from ._models import PagedServiceResourceDescriptionList + from ._models import PagedSubNameInfoList + from ._models import PagedUpdatePartitionLoadResultList + from ._models import PagedVolumeResourceDescriptionList + from ._models import PartitionAnalysisEvent + from ._models import PartitionBackupConfigurationInfo + from ._models import PartitionBackupEntity + from ._models import PartitionDataLossProgress + from ._models import PartitionEvent + from ._models import PartitionHealth + from ._models import PartitionHealthEvaluation + from ._models import PartitionHealthReportExpiredEvent + from ._models import PartitionHealthState + from ._models import PartitionHealthStateChunk + from ._models import PartitionHealthStateChunkList + from ._models import PartitionHealthStateFilter + from ._models import PartitionInformation + from ._models import PartitionInstanceCountScaleMechanism + from ._models import PartitionLoadInformation + from ._models import PartitionMetricLoadDescription + from ._models import PartitionNewHealthReportEvent + from ._models import PartitionPrimaryMoveAnalysisEvent + from ._models import PartitionQuorumLossProgress + from ._models import PartitionReconfiguredEvent + from ._models import PartitionRestartProgress + from ._models import PartitionSafetyCheck + from ._models import PartitionSchemeDescription + from ._models import PartitionsHealthEvaluation + from ._models import PrimaryReplicatorStatus + from ._models import Probe + from ._models import ProbeExec + from ._models import ProbeHttpGet + from ._models import ProbeHttpGetHeaders + from ._models import ProbeTcpSocket + from ._models import PropertyBatchDescriptionList + from ._models import PropertyBatchInfo + from ._models import PropertyBatchOperation + from ._models import PropertyDescription + from ._models import PropertyInfo + from ._models import PropertyMetadata + from ._models import PropertyValue + from ._models import ProvisionApplicationTypeDescription + from ._models import ProvisionApplicationTypeDescriptionBase + from ._models import ProvisionFabricDescription + from ._models import PutPropertyBatchOperation + from ._models import ReconfigurationInformation + from ._models import RegistryCredential + from ._models import ReliableCollectionsRef + from ._models import RemoteReplicatorAcknowledgementDetail + from ._models import RemoteReplicatorAcknowledgementStatus + from ._models import RemoteReplicatorStatus + from ._models import RepairImpactDescriptionBase + from ._models import RepairTargetDescriptionBase + from ._models import RepairTask + from ._models import RepairTaskApproveDescription + from ._models import RepairTaskCancelDescription + from ._models import RepairTaskDeleteDescription + from ._models import RepairTaskHistory + from ._models import RepairTaskUpdateHealthPolicyDescription + from ._models import RepairTaskUpdateInfo + from ._models import ReplicaEvent + from ._models import ReplicaHealth + from ._models import ReplicaHealthEvaluation + from ._models import ReplicaHealthState + from ._models import ReplicaHealthStateChunk + from ._models import ReplicaHealthStateChunkList + from ._models import ReplicaHealthStateFilter + from ._models import ReplicaInfo + from ._models import ReplicaLifecycleDescription + from ._models import ReplicaMetricLoadDescription + from ._models import ReplicasHealthEvaluation + from ._models import ReplicaStatusBase + from ._models import ReplicatorQueueStatus + from ._models import ReplicatorStatus + from ._models import ResolvedServiceEndpoint + from ._models import ResolvedServicePartition + from ._models import ResourceLimits + from ._models import ResourceRequests + from ._models import ResourceRequirements + from ._models import RestartDeployedCodePackageDescription + from ._models import RestartNodeDescription + from ._models import RestartPartitionResult + from ._models import RestorePartitionDescription + from ._models import RestoreProgressInfo + from ._models import ResumeApplicationUpgradeDescription + from ._models import ResumeClusterUpgradeDescription + from ._models import RetentionPolicyDescription + from ._models import RollingUpgradeUpdateDescription + from ._models import RunToCompletionExecutionPolicy + from ._models import SafetyCheck + from ._models import SafetyCheckWrapper + from ._models import ScalingMechanismDescription + from ._models import ScalingPolicyDescription + from ._models import ScalingTriggerDescription + from ._models import SecondaryActiveReplicatorStatus + from ._models import SecondaryIdleReplicatorStatus + from ._models import SecondaryReplicatorStatus + from ._models import SecretResourceDescription + from ._models import SecretResourceProperties + from ._models import SecretResourcePropertiesBase + from ._models import SecretValue + from ._models import SecretValueProperties + from ._models import SecretValueResourceDescription + from ._models import SeedNodeSafetyCheck + from ._models import SelectedPartition + from ._models import ServiceBackupConfigurationInfo + from ._models import ServiceBackupEntity + from ._models import ServiceCorrelationDescription + from ._models import ServiceCreatedEvent + from ._models import ServiceDeletedEvent + from ._models import ServiceDescription + from ._models import ServiceEvent + from ._models import ServiceFromTemplateDescription + from ._models import ServiceHealth + from ._models import ServiceHealthEvaluation + from ._models import ServiceHealthReportExpiredEvent + from ._models import ServiceHealthState + from ._models import ServiceHealthStateChunk + from ._models import ServiceHealthStateChunkList + from ._models import ServiceHealthStateFilter + from ._models import ServiceIdentity + from ._models import ServiceInfo + from ._models import ServiceLoadMetricDescription + from ._models import ServiceNameInfo + from ._models import ServiceNewHealthReportEvent + from ._models import ServicePartitionInfo + from ._models import ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription + from ._models import ServicePlacementInvalidDomainPolicyDescription + from ._models import ServicePlacementNonPartiallyPlaceServicePolicyDescription + from ._models import ServicePlacementPolicyDescription + from ._models import ServicePlacementPreferPrimaryDomainPolicyDescription + from ._models import ServicePlacementRequiredDomainPolicyDescription + from ._models import ServicePlacementRequireDomainDistributionPolicyDescription + from ._models import ServiceProperties + from ._models import ServiceReplicaDescription + from ._models import ServiceReplicaProperties + from ._models import ServiceResourceDescription + from ._models import ServicesHealthEvaluation + from ._models import ServiceTypeDescription + from ._models import ServiceTypeExtensionDescription + from ._models import ServiceTypeHealthPolicy + from ._models import ServiceTypeHealthPolicyMapItem + from ._models import ServiceTypeInfo + from ._models import ServiceTypeManifest + from ._models import ServiceUpdateDescription + from ._models import ServiceUpgradeProgress + from ._models import Setting + from ._models import SingletonPartitionInformation + from ._models import SingletonPartitionSchemeDescription + from ._models import StartClusterUpgradeDescription + from ._models import StartedChaosEvent + from ._models import StatefulReplicaHealthReportExpiredEvent + from ._models import StatefulReplicaNewHealthReportEvent + from ._models import StatefulServiceDescription + from ._models import StatefulServiceInfo + from ._models import StatefulServicePartitionInfo + from ._models import StatefulServiceReplicaHealth + from ._models import StatefulServiceReplicaHealthState + from ._models import StatefulServiceReplicaInfo + from ._models import StatefulServiceTypeDescription + from ._models import StatefulServiceUpdateDescription + from ._models import StatelessReplicaHealthReportExpiredEvent + from ._models import StatelessReplicaNewHealthReportEvent + from ._models import StatelessServiceDescription + from ._models import StatelessServiceInfo + from ._models import StatelessServiceInstanceHealth + from ._models import StatelessServiceInstanceHealthState + from ._models import StatelessServiceInstanceInfo + from ._models import StatelessServicePartitionInfo + from ._models import StatelessServiceTypeDescription + from ._models import StatelessServiceUpdateDescription + from ._models import StoppedChaosEvent + from ._models import StringPropertyValue + from ._models import SuccessfulPropertyBatchInfo + from ._models import SystemApplicationHealthEvaluation + from ._models import TcpConfig + from ._models import TestErrorChaosEvent + from ._models import TimeBasedBackupScheduleDescription + from ._models import TimeOfDay + from ._models import TimeRange + from ._models import UniformInt64RangePartitionSchemeDescription + from ._models import UnplacedReplicaInformation + from ._models import UnprovisionApplicationTypeDescriptionInfo + from ._models import UnprovisionFabricDescription + from ._models import UpdateClusterUpgradeDescription + from ._models import UpdatePartitionLoadResult + from ._models import UpgradeDomainDeltaNodesCheckHealthEvaluation + from ._models import UpgradeDomainInfo + from ._models import UpgradeDomainNodesHealthEvaluation + from ._models import UpgradeOrchestrationServiceState + from ._models import UpgradeOrchestrationServiceStateSummary + from ._models import UploadChunkRange + from ._models import UploadSession + from ._models import UploadSessionInfo + from ._models import UsageInfo + from ._models import ValidationFailedChaosEvent + from ._models import VolumeProviderParametersAzureFile + from ._models import VolumeReference + from ._models import VolumeResourceDescription + from ._models import WaitForInbuildReplicaSafetyCheck + from ._models import WaitForPrimaryPlacementSafetyCheck + from ._models import WaitForPrimarySwapSafetyCheck + from ._models import WaitForReconfigurationSafetyCheck + from ._models import WaitingChaosEvent +from ._service_fabric_client_ap_is_enums import ( ApplicationDefinitionKind, ApplicationPackageCleanupPolicy, ApplicationResourceUpgradeState, @@ -1090,7 +1088,6 @@ HealthEvaluationKind, HealthState, HostIsolationMode, - HostOptions, HostType, ImageRegistryPasswordType, ImpactLevel, @@ -1111,7 +1108,6 @@ PackageSharingPolicyScope, PartitionAccessStatus, PartitionScheme, - PathMatchType, PropertyBatchInfoKind, PropertyBatchOperationKind, PropertyValueKind, @@ -1200,6 +1196,7 @@ 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', + 'ApplicationsHealthEvaluation', 'ApplicationTypeApplicationsHealthEvaluation', 'ApplicationTypeHealthPolicyMapItem', 'ApplicationTypeImageStorePath', @@ -1213,7 +1210,6 @@ 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStartedEvent', 'ApplicationUpgradeUpdateDescription', - 'ApplicationsHealthEvaluation', 'AutoScalingMechanism', 'AutoScalingMetric', 'AutoScalingPolicy', @@ -1239,8 +1235,8 @@ 'ChaosCodePackageRestartScheduledEvent', 'ChaosContext', 'ChaosEvent', - 'ChaosEventWrapper', 'ChaosEventsSegment', + 'ChaosEventWrapper', 'ChaosNodeRestartScheduledEvent', 'ChaosParameters', 'ChaosParametersDictionaryItem', @@ -1302,7 +1298,6 @@ 'DefaultExecutionPolicy', 'DeletePropertyBatchOperation', 'DeltaNodesCheckHealthEvaluation', - 'DeployServicePackageToNodeDescription', 'DeployedApplicationHealth', 'DeployedApplicationHealthEvaluation', 'DeployedApplicationHealthReportExpiredEvent', @@ -1331,6 +1326,7 @@ 'DeployedStatefulServiceReplicaInfo', 'DeployedStatelessServiceInstanceDetailInfo', 'DeployedStatelessServiceInstanceInfo', + 'DeployServicePackageToNodeDescription', 'DiagnosticsDescription', 'DiagnosticsRef', 'DiagnosticsSinkProperties', @@ -1356,7 +1352,7 @@ 'ExternalStoreProvisionApplicationTypeDescription', 'FabricCodeVersionInfo', 'FabricConfigVersionInfo', - 'FabricError', + 'FabricError', 'FabricErrorException', 'FabricErrorError', 'FabricEvent', 'FailedPropertyBatchInfo', @@ -1398,12 +1394,12 @@ 'InvokeDataLossResult', 'InvokeQuorumLossResult', 'KeyValueStoreReplicaStatus', - 'LoadMetricInformation', - 'LoadMetricReport', - 'LoadMetricReportInfo', 'LoadedPartitionInformationQueryDescription', 'LoadedPartitionInformationResult', 'LoadedPartitionInformationResultList', + 'LoadMetricInformation', + 'LoadMetricReport', + 'LoadMetricReportInfo', 'LocalNetworkResourceProperties', 'ManagedApplicationIdentity', 'ManagedApplicationIdentityDescription', @@ -1446,6 +1442,7 @@ 'NodeRepairImpactDescription', 'NodeRepairTargetDescription', 'NodeResult', + 'NodesHealthEvaluation', 'NodeTagsDescription', 'NodeTransitionProgress', 'NodeTransitionResult', @@ -1453,7 +1450,6 @@ 'NodeTypeNodesHealthEvaluation', 'NodeUpEvent', 'NodeUpgradeProgressInfo', - 'NodesHealthEvaluation', 'OperationStatus', 'PackageSharingPolicyInfo', 'PagedApplicationInfoList', @@ -1545,8 +1541,8 @@ 'ReplicaInfo', 'ReplicaLifecycleDescription', 'ReplicaMetricLoadDescription', - 'ReplicaStatusBase', 'ReplicasHealthEvaluation', + 'ReplicaStatusBase', 'ReplicatorQueueStatus', 'ReplicatorStatus', 'ResolvedServiceEndpoint', @@ -1578,7 +1574,6 @@ 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', - 'SecretValueResourceProperties', 'SeedNodeSafetyCheck', 'SelectedPartition', 'ServiceBackupConfigurationInfo', @@ -1607,13 +1602,13 @@ 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'ServicePlacementPolicyDescription', 'ServicePlacementPreferPrimaryDomainPolicyDescription', - 'ServicePlacementRequireDomainDistributionPolicyDescription', 'ServicePlacementRequiredDomainPolicyDescription', + 'ServicePlacementRequireDomainDistributionPolicyDescription', 'ServiceProperties', 'ServiceReplicaDescription', 'ServiceReplicaProperties', 'ServiceResourceDescription', - 'ServiceResourceProperties', + 'ServicesHealthEvaluation', 'ServiceTypeDescription', 'ServiceTypeExtensionDescription', 'ServiceTypeHealthPolicy', @@ -1622,7 +1617,6 @@ 'ServiceTypeManifest', 'ServiceUpdateDescription', 'ServiceUpgradeProgress', - 'ServicesHealthEvaluation', 'Setting', 'SingletonPartitionInformation', 'SingletonPartitionSchemeDescription', @@ -1681,118 +1675,116 @@ 'WaitForPrimarySwapSafetyCheck', 'WaitForReconfigurationSafetyCheck', 'WaitingChaosEvent', + 'HealthState', + 'FabricErrorCodes', 'ApplicationDefinitionKind', - 'ApplicationPackageCleanupPolicy', - 'ApplicationResourceUpgradeState', - 'ApplicationScopedVolumeKind', 'ApplicationStatus', + 'ApplicationPackageCleanupPolicy', 'ApplicationTypeDefinitionKind', 'ApplicationTypeStatus', - 'AutoScalingMechanismKind', - 'AutoScalingMetricKind', - 'AutoScalingResourceMetricName', - 'AutoScalingTriggerKind', - 'BackupEntityKind', - 'BackupPolicyScope', - 'BackupScheduleFrequencyType', - 'BackupScheduleKind', - 'BackupState', - 'BackupStorageKind', - 'BackupSuspensionScope', - 'BackupType', - 'ChaosEventKind', - 'ChaosScheduleStatus', - 'ChaosStatus', - 'ComposeDeploymentStatus', - 'ComposeDeploymentUpgradeState', - 'CreateFabricDump', - 'DataLossMode', - 'DayOfWeek', + 'UpgradeKind', + 'UpgradeMode', + 'UpgradeSortOrder', + 'FailureAction', + 'UpgradeDomainState', + 'UpgradeState', + 'NodeUpgradePhase', + 'FailureReason', 'DeactivationIntent', 'DeployedApplicationStatus', - 'DeploymentStatus', - 'DiagnosticsSinkKind', + 'ReplicaStatus', + 'ReplicaRole', + 'ReconfigurationPhase', + 'ReconfigurationType', 'EntityKind', - 'EntryPointStatus', - 'EnvironmentVariableType', - 'ExecutionPolicyType', - 'FabricErrorCodes', 'FabricEventKind', - 'FabricReplicaStatus', - 'FailureAction', - 'FailureReason', - 'HeaderMatchType', 'HealthEvaluationKind', - 'HealthState', - 'HostIsolationMode', - 'HostOptions', - 'HostType', - 'ImageRegistryPasswordType', - 'ImpactLevel', - 'ManagedIdentityType', - 'MoveCost', - 'NetworkKind', + 'Ordering', 'NodeDeactivationIntent', 'NodeDeactivationStatus', 'NodeDeactivationTaskType', 'NodeStatus', - 'NodeStatusFilter', - 'NodeTransitionType', - 'NodeUpgradePhase', - 'OperatingSystemType', + 'ServicePartitionStatus', + 'ServiceStatus', + 'ProvisionApplicationTypeKind', + 'UpgradeType', + 'SafetyCheckKind', + 'CreateFabricDump', + 'ServicePackageActivationMode', + 'ServiceKind', + 'ServicePartitionKind', + 'ServicePlacementPolicyType', + 'ServiceLoadMetricWeight', + 'HostType', + 'HostIsolationMode', + 'DeploymentStatus', + 'EntryPointStatus', + 'ChaosStatus', + 'ChaosScheduleStatus', + 'ChaosEventKind', + 'ComposeDeploymentStatus', + 'ComposeDeploymentUpgradeState', + 'ServiceCorrelationScheme', + 'MoveCost', + 'PartitionScheme', + 'ServiceOperationName', + 'ReplicatorOperationName', + 'PartitionAccessStatus', + 'FabricReplicaStatus', + 'ReplicaKind', + 'ServiceTypeRegistrationStatus', + 'ServiceEndpointRole', 'OperationState', 'OperationType', - 'Ordering', 'PackageSharingPolicyScope', - 'PartitionAccessStatus', - 'PartitionScheme', - 'PathMatchType', - 'PropertyBatchInfoKind', - 'PropertyBatchOperationKind', 'PropertyValueKind', - 'ProvisionApplicationTypeKind', - 'QuorumLossMode', - 'ReconfigurationPhase', - 'ReconfigurationType', + 'PropertyBatchOperationKind', + 'PropertyBatchInfoKind', + 'RetentionPolicyType', + 'BackupStorageKind', + 'BackupScheduleKind', + 'BackupPolicyScope', + 'BackupSuspensionScope', + 'RestoreState', + 'BackupType', + 'ManagedIdentityType', + 'BackupScheduleFrequencyType', + 'DayOfWeek', + 'BackupState', + 'BackupEntityKind', + 'ImpactLevel', 'RepairImpactKind', 'RepairTargetKind', - 'RepairTaskHealthCheckState', - 'ReplicaHealthReportServiceKind', - 'ReplicaKind', - 'ReplicaRole', - 'ReplicaStatus', - 'ReplicatorOperationName', - 'ResourceStatus', - 'RestartPartitionMode', - 'RestartPolicy', - 'RestoreState', + 'State', 'ResultStatus', - 'RetentionPolicyType', - 'RollingUpgradeMode', - 'SafetyCheckKind', - 'ScalingMechanismKind', + 'RepairTaskHealthCheckState', 'ScalingTriggerKind', - 'Scheme', + 'ScalingMechanismKind', + 'ResourceStatus', 'SecretKind', - 'ServiceCorrelationScheme', - 'ServiceEndpointRole', - 'ServiceKind', - 'ServiceLoadMetricWeight', - 'ServiceOperationName', - 'ServicePackageActivationMode', - 'ServicePartitionKind', - 'ServicePartitionStatus', - 'ServicePlacementPolicyType', - 'ServiceStatus', - 'ServiceTypeRegistrationStatus', - 'SettingType', - 'SizeTypes', - 'State', - 'UpgradeDomainState', - 'UpgradeKind', - 'UpgradeMode', - 'UpgradeSortOrder', - 'UpgradeState', - 'UpgradeType', 'VolumeProvider', + 'SizeTypes', + 'ApplicationScopedVolumeKind', + 'NetworkKind', + 'HeaderMatchType', + 'OperatingSystemType', + 'ImageRegistryPasswordType', + 'EnvironmentVariableType', + 'SettingType', + 'Scheme', + 'ApplicationResourceUpgradeState', + 'RollingUpgradeMode', + 'DiagnosticsSinkKind', + 'AutoScalingMechanismKind', + 'AutoScalingMetricKind', + 'AutoScalingResourceMetricName', + 'AutoScalingTriggerKind', + 'ExecutionPolicyType', + 'RestartPolicy', + 'NodeStatusFilter', + 'ReplicaHealthReportServiceKind', + 'DataLossMode', + 'NodeTransitionType', + 'QuorumLossMode', + 'RestartPartitionMode', ] diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models.py index a5b67aec98b9..0fe5e3b4a876 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models.py @@ -1,16 +1,19 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from azure.core.exceptions import HttpResponseError -import msrest.serialization +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError -class AadMetadata(msrest.serialization.Model): +class AadMetadata(Model): """Azure Active Directory metadata used for secured connection to cluster. :param authority: The AAD authority url. @@ -36,10 +39,7 @@ class AadMetadata(msrest.serialization.Model): 'tenant': {'key': 'tenant', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AadMetadata, self).__init__(**kwargs) self.authority = kwargs.get('authority', None) self.client = kwargs.get('client', None) @@ -49,12 +49,14 @@ def __init__( self.tenant = kwargs.get('tenant', None) -class AadMetadataObject(msrest.serialization.Model): - """Azure Active Directory metadata object used for secured connection to cluster. +class AadMetadataObject(Model): + """Azure Active Directory metadata object used for secured connection to + cluster. :param type: The client authentication method. :type type: str - :param metadata: Azure Active Directory metadata used for secured connection to cluster. + :param metadata: Azure Active Directory metadata used for secured + connection to cluster. :type metadata: ~azure.servicefabric.models.AadMetadata """ @@ -63,27 +65,23 @@ class AadMetadataObject(msrest.serialization.Model): 'metadata': {'key': 'metadata', 'type': 'AadMetadata'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AadMetadataObject, self).__init__(**kwargs) self.type = kwargs.get('type', None) self.metadata = kwargs.get('metadata', None) -class ScalingMechanismDescription(msrest.serialization.Model): +class ScalingMechanismDescription(Model): """Describes the mechanism for performing a scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AddRemoveIncrementalNamedPartitionScalingMechanism, PartitionInstanceCountScaleMechanism. + sub-classes are: PartitionInstanceCountScaleMechanism, + AddRemoveIncrementalNamedPartitionScalingMechanism All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. - Possible values include: "Invalid", "PartitionInstanceCount", - "AddRemoveIncrementalNamedPartition". - :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -95,32 +93,30 @@ class ScalingMechanismDescription(msrest.serialization.Model): } _subtype_map = { - 'kind': {'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism', 'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism'} + 'kind': {'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism', 'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ScalingMechanismDescription, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescription): - """Represents a scaling mechanism for adding or removing named partitions of a stateless service. Partition names are in the format '0','1''N-1'. + """Represents a scaling mechanism for adding or removing named partitions of a + stateless service. Partition names are in the format '0','1''N-1'. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. - Possible values include: "Invalid", "PartitionInstanceCount", - "AddRemoveIncrementalNamedPartition". - :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind - :param min_partition_count: Required. Minimum number of named partitions of the service. + :param kind: Required. Constant filled by server. + :type kind: str + :param min_partition_count: Required. Minimum number of named partitions + of the service. :type min_partition_count: int - :param max_partition_count: Required. Maximum number of named partitions of the service. + :param max_partition_count: Required. Maximum number of named partitions + of the service. :type max_partition_count: int - :param scale_increment: Required. The number of instances to add or remove during a scaling - operation. + :param scale_increment: Required. The number of instances to add or remove + during a scaling operation. :type scale_increment: int """ @@ -138,28 +134,25 @@ class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescrip 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AddRemoveIncrementalNamedPartitionScalingMechanism, self).__init__(**kwargs) - self.kind = 'AddRemoveIncrementalNamedPartition' # type: str - self.min_partition_count = kwargs['min_partition_count'] - self.max_partition_count = kwargs['max_partition_count'] - self.scale_increment = kwargs['scale_increment'] + self.min_partition_count = kwargs.get('min_partition_count', None) + self.max_partition_count = kwargs.get('max_partition_count', None) + self.scale_increment = kwargs.get('scale_increment', None) + self.kind = 'AddRemoveIncrementalNamedPartition' -class AutoScalingMechanism(msrest.serialization.Model): - """Describes the mechanism for performing auto scaling operation. Derived classes will describe the actual mechanism. +class AutoScalingMechanism(Model): + """Describes the mechanism for performing auto scaling operation. Derived + classes will describe the actual mechanism. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AddRemoveReplicaScalingMechanism. + sub-classes are: AddRemoveReplicaScalingMechanism All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling mechanism.Constant filled by server. Possible - values include: "AddRemoveReplica". - :type kind: str or ~azure.servicefabric.models.AutoScalingMechanismKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -174,30 +167,27 @@ class AutoScalingMechanism(msrest.serialization.Model): 'kind': {'AddRemoveReplica': 'AddRemoveReplicaScalingMechanism'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AutoScalingMechanism, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class AddRemoveReplicaScalingMechanism(AutoScalingMechanism): - """Describes the horizontal auto scaling mechanism that adds or removes replicas (containers or container groups). + """Describes the horizontal auto scaling mechanism that adds or removes + replicas (containers or container groups). All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling mechanism.Constant filled by server. Possible - values include: "AddRemoveReplica". - :type kind: str or ~azure.servicefabric.models.AutoScalingMechanismKind - :param min_count: Required. Minimum number of containers (scale down won't be performed below - this number). + :param kind: Required. Constant filled by server. + :type kind: str + :param min_count: Required. Minimum number of containers (scale down won't + be performed below this number). :type min_count: int - :param max_count: Required. Maximum number of containers (scale up won't be performed above - this number). + :param max_count: Required. Maximum number of containers (scale up won't + be performed above this number). :type max_count: int - :param scale_increment: Required. Each time auto scaling is performed, this number of - containers will be added or removed. + :param scale_increment: Required. Each time auto scaling is performed, + this number of containers will be added or removed. :type scale_increment: int """ @@ -215,24 +205,21 @@ class AddRemoveReplicaScalingMechanism(AutoScalingMechanism): 'scale_increment': {'key': 'scaleIncrement', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AddRemoveReplicaScalingMechanism, self).__init__(**kwargs) - self.kind = 'AddRemoveReplica' # type: str - self.min_count = kwargs['min_count'] - self.max_count = kwargs['max_count'] - self.scale_increment = kwargs['scale_increment'] + self.min_count = kwargs.get('min_count', None) + self.max_count = kwargs.get('max_count', None) + self.scale_increment = kwargs.get('scale_increment', None) + self.kind = 'AddRemoveReplica' -class AnalysisEventMetadata(msrest.serialization.Model): +class AnalysisEventMetadata(Model): """Metadata about an Analysis Event. :param delay: The analysis delay. - :type delay: ~datetime.timedelta + :type delay: timedelta :param duration: The duration of analysis. - :type duration: ~datetime.timedelta + :type duration: timedelta """ _attribute_map = { @@ -240,35 +227,33 @@ class AnalysisEventMetadata(msrest.serialization.Model): 'duration': {'key': 'Duration', 'type': 'duration'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AnalysisEventMetadata, self).__init__(**kwargs) self.delay = kwargs.get('delay', None) self.duration = kwargs.get('duration', None) -class BackupConfigurationInfo(msrest.serialization.Model): +class BackupConfigurationInfo(Model): """Describes the backup configuration information. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationBackupConfigurationInfo, PartitionBackupConfigurationInfo, ServiceBackupConfigurationInfo. + sub-classes are: ApplicationBackupConfigurationInfo, + ServiceBackupConfigurationInfo, PartitionBackupConfigurationInfo All required parameters must be populated in order to send to Azure. - :param kind: Required. The entity type of a Service Fabric entity such as Application, Service - or a Partition where periodic backups can be enabled.Constant filled by server. Possible - values include: "Invalid", "Partition", "Service", "Application". - :type kind: str or ~azure.servicefabric.models.BackupEntityKind - :param policy_name: The name of the backup policy which is applicable to this Service Fabric - application or service or partition. + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup policy is applied. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -276,45 +261,45 @@ class BackupConfigurationInfo(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo'} + 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BackupConfigurationInfo, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.policy_name = kwargs.get('policy_name', None) self.policy_inherited_from = kwargs.get('policy_inherited_from', None) self.suspension_info = kwargs.get('suspension_info', None) + self.kind = None class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information for a specific Service Fabric application specifying what backup policy is being applied and suspend description, if any. + """Backup configuration information for a specific Service Fabric application + specifying what backup policy is being applied and suspend description, if + any. All required parameters must be populated in order to send to Azure. - :param kind: Required. The entity type of a Service Fabric entity such as Application, Service - or a Partition where periodic backups can be enabled.Constant filled by server. Possible - values include: "Invalid", "Partition", "Service", "Application". - :type kind: str or ~azure.servicefabric.models.BackupEntityKind - :param policy_name: The name of the backup policy which is applicable to this Service Fabric - application or service or partition. + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup policy is applied. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str """ @@ -323,34 +308,30 @@ class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationBackupConfigurationInfo, self).__init__(**kwargs) - self.kind = 'Application' # type: str self.application_name = kwargs.get('application_name', None) + self.kind = 'Application' -class BackupEntity(msrest.serialization.Model): +class BackupEntity(Model): """Describes the Service Fabric entity that is configured for backup. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationBackupEntity, PartitionBackupEntity, ServiceBackupEntity. + sub-classes are: ApplicationBackupEntity, ServiceBackupEntity, + PartitionBackupEntity All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, - Service or a Partition where periodic backups can be enabled.Constant filled by server. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str """ _validation = { @@ -362,15 +343,12 @@ class BackupEntity(msrest.serialization.Model): } _subtype_map = { - 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Partition': 'PartitionBackupEntity', 'Service': 'ServiceBackupEntity'} + 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Service': 'ServiceBackupEntity', 'Partition': 'PartitionBackupEntity'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BackupEntity, self).__init__(**kwargs) - self.entity_kind = None # type: Optional[str] + self.entity_kind = None class ApplicationBackupEntity(BackupEntity): @@ -378,11 +356,10 @@ class ApplicationBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, - Service or a Partition where periodic backups can be enabled.Constant filled by server. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str """ @@ -395,35 +372,38 @@ class ApplicationBackupEntity(BackupEntity): 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationBackupEntity, self).__init__(**kwargs) - self.entity_kind = 'Application' # type: str self.application_name = kwargs.get('application_name', None) - - -class ApplicationCapacityDescription(msrest.serialization.Model): - """Describes capacity information for services of this application. This description can be used for describing the following. - - -* Reserving the capacity for the services on the nodes -* Limiting the total number of nodes that services of this application can run on -* Limiting the custom capacity metrics to limit the total consumption of this metric by the services of this application. - - :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity - for this application. Note that this does not mean that the services of this application will - be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. - The value of this property cannot be more than the value of the MaximumNodes property. + self.entity_kind = 'Application' + + +class ApplicationCapacityDescription(Model): + """Describes capacity information for services of this application. This + description can be used for describing the following. + - Reserving the capacity for the services on the nodes + - Limiting the total number of nodes that services of this application can + run on + - Limiting the custom capacity metrics to limit the total consumption of + this metric by the services of this application. + + :param minimum_nodes: The minimum number of nodes where Service Fabric + will reserve capacity for this application. Note that this does not mean + that the services of this application will be placed on all of those + nodes. If this property is set to zero, no capacity will be reserved. The + value of this property cannot be more than the value of the MaximumNodes + property. :type minimum_nodes: long - :param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity - for this application. Note that this does not mean that the services of this application will - be placed on all of those nodes. By default, the value of this property is zero and it means - that the services can be placed on any node. + :param maximum_nodes: The maximum number of nodes where Service Fabric + will reserve capacity for this application. Note that this does not mean + that the services of this application will be placed on all of those + nodes. By default, the value of this property is zero and it means that + the services can be placed on any node. Default value: 0 . :type maximum_nodes: long - :param application_metrics: List of application capacity metric description. - :type application_metrics: list[~azure.servicefabric.models.ApplicationMetricDescription] + :param application_metrics: List of application capacity metric + description. + :type application_metrics: + list[~azure.servicefabric.models.ApplicationMetricDescription] """ _validation = { @@ -437,164 +417,127 @@ class ApplicationCapacityDescription(msrest.serialization.Model): 'application_metrics': {'key': 'ApplicationMetrics', 'type': '[ApplicationMetricDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationCapacityDescription, self).__init__(**kwargs) self.minimum_nodes = kwargs.get('minimum_nodes', None) self.maximum_nodes = kwargs.get('maximum_nodes', 0) self.application_metrics = kwargs.get('application_metrics', None) -class FabricEvent(msrest.serialization.Model): +class FabricEvent(Model): """Represents the base for all Fabric Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, + NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'ApplicationEvent': 'ApplicationEvent', 'ClusterEvent': 'ClusterEvent', 'ContainerInstanceEvent': 'ContainerInstanceEvent', 'NodeEvent': 'NodeEvent', 'PartitionEvent': 'PartitionEvent', 'ReplicaEvent': 'ReplicaEvent', 'ServiceEvent': 'ServiceEvent'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FabricEvent, self).__init__(**kwargs) - self.kind = None # type: Optional[str] - self.event_instance_id = kwargs['event_instance_id'] + self.event_instance_id = kwargs.get('event_instance_id', None) self.category = kwargs.get('category', None) - self.time_stamp = kwargs['time_stamp'] + self.time_stamp = kwargs.get('time_stamp', None) self.has_correlated_events = kwargs.get('has_correlated_events', None) + self.kind = None class ApplicationEvent(FabricEvent): """Represents the base for all Application Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationContainerInstanceExitedEvent, ApplicationCreatedEvent, ApplicationDeletedEvent, ApplicationHealthReportExpiredEvent, ApplicationNewHealthReportEvent, ApplicationProcessExitedEvent, ApplicationUpgradeCompletedEvent, ApplicationUpgradeDomainCompletedEvent, ApplicationUpgradeRollbackCompletedEvent, ApplicationUpgradeRollbackStartedEvent, ApplicationUpgradeStartedEvent, ChaosCodePackageRestartScheduledEvent, DeployedApplicationHealthReportExpiredEvent, DeployedApplicationNewHealthReportEvent, DeployedServicePackageHealthReportExpiredEvent, DeployedServicePackageNewHealthReportEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: ApplicationCreatedEvent, ApplicationDeletedEvent, + ApplicationNewHealthReportEvent, ApplicationHealthReportExpiredEvent, + ApplicationUpgradeCompletedEvent, ApplicationUpgradeDomainCompletedEvent, + ApplicationUpgradeRollbackCompletedEvent, + ApplicationUpgradeRollbackStartedEvent, ApplicationUpgradeStartedEvent, + DeployedApplicationNewHealthReportEvent, + DeployedApplicationHealthReportExpiredEvent, ApplicationProcessExitedEvent, + ApplicationContainerInstanceExitedEvent, + DeployedServicePackageNewHealthReportEvent, + DeployedServicePackageHealthReportExpiredEvent, + ChaosCodePackageRestartScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ApplicationContainerInstanceExited': 'ApplicationContainerInstanceExitedEvent', 'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationNewHealthReport': 'ApplicationNewHealthReportEvent', 'ApplicationProcessExited': 'ApplicationProcessExitedEvent', 'ApplicationUpgradeCompleted': 'ApplicationUpgradeCompletedEvent', 'ApplicationUpgradeDomainCompleted': 'ApplicationUpgradeDomainCompletedEvent', 'ApplicationUpgradeRollbackCompleted': 'ApplicationUpgradeRollbackCompletedEvent', 'ApplicationUpgradeRollbackStarted': 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStarted': 'ApplicationUpgradeStartedEvent', 'ChaosCodePackageRestartScheduled': 'ChaosCodePackageRestartScheduledEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'DeployedApplicationNewHealthReport': 'DeployedApplicationNewHealthReportEvent', 'DeployedServicePackageHealthReportExpired': 'DeployedServicePackageHealthReportExpiredEvent', 'DeployedServicePackageNewHealthReport': 'DeployedServicePackageNewHealthReportEvent'} + 'kind': {'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationNewHealthReport': 'ApplicationNewHealthReportEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationUpgradeCompleted': 'ApplicationUpgradeCompletedEvent', 'ApplicationUpgradeDomainCompleted': 'ApplicationUpgradeDomainCompletedEvent', 'ApplicationUpgradeRollbackCompleted': 'ApplicationUpgradeRollbackCompletedEvent', 'ApplicationUpgradeRollbackStarted': 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStarted': 'ApplicationUpgradeStartedEvent', 'DeployedApplicationNewHealthReport': 'DeployedApplicationNewHealthReportEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'ApplicationProcessExited': 'ApplicationProcessExitedEvent', 'ApplicationContainerInstanceExited': 'ApplicationContainerInstanceExitedEvent', 'DeployedServicePackageNewHealthReport': 'DeployedServicePackageNewHealthReportEvent', 'DeployedServicePackageHealthReportExpired': 'DeployedServicePackageHealthReportExpiredEvent', 'ChaosCodePackageRestartScheduled': 'ChaosCodePackageRestartScheduledEvent'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationEvent, self).__init__(**kwargs) - self.kind = 'ApplicationEvent' # type: str - self.application_id = kwargs['application_id'] + self.application_id = kwargs.get('application_id', None) + self.kind = 'ApplicationEvent' class ApplicationContainerInstanceExitedEvent(ApplicationEvent): @@ -602,50 +545,32 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param service_name: Required. Name of Service. :type service_name: str :param service_package_name: Required. Name of Service package. :type service_package_name: str - :param service_package_activation_id: Required. Activation Id of Service package. + :param service_package_activation_id: Required. Activation Id of Service + package. :type service_package_activation_id: str :param is_exclusive: Required. Indicates IsExclusive flag. :type is_exclusive: bool @@ -661,16 +586,17 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): :type host_id: str :param exit_code: Required. Exit code of process. :type exit_code: long - :param unexpected_termination: Required. Indicates if termination is unexpected. + :param unexpected_termination: Required. Indicates if termination is + unexpected. :type unexpected_termination: bool :param start_time: Required. Start time of process. - :type start_time: ~datetime.datetime + :type start_time: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'service_name': {'required': True}, 'service_package_name': {'required': True}, @@ -687,11 +613,11 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, @@ -707,24 +633,21 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationContainerInstanceExitedEvent, self).__init__(**kwargs) - self.kind = 'ApplicationContainerInstanceExited' # type: str - self.service_name = kwargs['service_name'] - self.service_package_name = kwargs['service_package_name'] - self.service_package_activation_id = kwargs['service_package_activation_id'] - self.is_exclusive = kwargs['is_exclusive'] - self.code_package_name = kwargs['code_package_name'] - self.entry_point_type = kwargs['entry_point_type'] - self.image_name = kwargs['image_name'] - self.container_name = kwargs['container_name'] - self.host_id = kwargs['host_id'] - self.exit_code = kwargs['exit_code'] - self.unexpected_termination = kwargs['unexpected_termination'] - self.start_time = kwargs['start_time'] + self.service_name = kwargs.get('service_name', None) + self.service_package_name = kwargs.get('service_package_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.is_exclusive = kwargs.get('is_exclusive', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.entry_point_type = kwargs.get('entry_point_type', None) + self.image_name = kwargs.get('image_name', None) + self.container_name = kwargs.get('container_name', None) + self.host_id = kwargs.get('host_id', None) + self.exit_code = kwargs.get('exit_code', None) + self.unexpected_termination = kwargs.get('unexpected_termination', None) + self.start_time = kwargs.get('start_time', None) + self.kind = 'ApplicationContainerInstanceExited' class ApplicationCreatedEvent(ApplicationEvent): @@ -732,44 +655,25 @@ class ApplicationCreatedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -780,9 +684,9 @@ class ApplicationCreatedEvent(ApplicationEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -790,26 +694,23 @@ class ApplicationCreatedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationCreatedEvent, self).__init__(**kwargs) - self.kind = 'ApplicationCreated' # type: str - self.application_type_name = kwargs['application_type_name'] - self.application_type_version = kwargs['application_type_version'] - self.application_definition_kind = kwargs['application_definition_kind'] + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.application_definition_kind = kwargs.get('application_definition_kind', None) + self.kind = 'ApplicationCreated' class ApplicationDeletedEvent(ApplicationEvent): @@ -817,44 +718,25 @@ class ApplicationDeletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -863,60 +745,62 @@ class ApplicationDeletedEvent(ApplicationEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationDeletedEvent, self).__init__(**kwargs) - self.kind = 'ApplicationDeleted' # type: str - self.application_type_name = kwargs['application_type_name'] - self.application_type_version = kwargs['application_type_version'] + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.kind = 'ApplicationDeleted' -class ApplicationDescription(msrest.serialization.Model): +class ApplicationDescription(Model): """Describes a Service Fabric application. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the application, including the 'fabric:' URI scheme. + :param name: Required. The name of the application, including the + 'fabric:' URI scheme. :type name: str - :param type_name: Required. The application type name as defined in the application manifest. - :type type_name: str - :param type_version: Required. The version of the application type as defined in the + :param type_name: Required. The application type name as defined in the application manifest. + :type type_name: str + :param type_version: Required. The version of the application type as + defined in the application manifest. :type type_version: str - :param parameter_list: List of application parameters with overridden values from their default - values specified in the application manifest. - :type parameter_list: list[~azure.servicefabric.models.ApplicationParameter] - :param application_capacity: Describes capacity information for services of this application. - This description can be used for describing the following. - - - * Reserving the capacity for the services on the nodes - * Limiting the total number of nodes that services of this application can run on - * Limiting the custom capacity metrics to limit the total consumption of this metric by the - services of this application. - :type application_capacity: ~azure.servicefabric.models.ApplicationCapacityDescription - :param managed_application_identity: Managed application identity description. + :param parameter_list: List of application parameters with overridden + values from their default values specified in the application manifest. + :type parameter_list: + list[~azure.servicefabric.models.ApplicationParameter] + :param application_capacity: Describes capacity information for services + of this application. This description can be used for describing the + following. + - Reserving the capacity for the services on the nodes + - Limiting the total number of nodes that services of this application can + run on + - Limiting the custom capacity metrics to limit the total consumption of + this metric by the services of this application + :type application_capacity: + ~azure.servicefabric.models.ApplicationCapacityDescription + :param managed_application_identity: Managed application identity + description. :type managed_application_identity: ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ @@ -936,36 +820,36 @@ class ApplicationDescription(msrest.serialization.Model): 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationDescription, self).__init__(**kwargs) - self.name = kwargs['name'] - self.type_name = kwargs['type_name'] - self.type_version = kwargs['type_version'] + self.name = kwargs.get('name', None) + self.type_name = kwargs.get('type_name', None) + self.type_version = kwargs.get('type_version', None) self.parameter_list = kwargs.get('parameter_list', None) self.application_capacity = kwargs.get('application_capacity', None) self.managed_application_identity = kwargs.get('managed_application_identity', None) -class EntityHealth(msrest.serialization.Model): - """Health information common to all entities in the cluster. It contains the aggregated health state, health events and unhealthy evaluation. +class EntityHealth(Model): + """Health information common to all entities in the cluster. It contains the + aggregated health state, health events and unhealthy evaluation. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics """ @@ -976,10 +860,7 @@ class EntityHealth(msrest.serialization.Model): 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EntityHealth, self).__init__(**kwargs) self.aggregated_health_state = kwargs.get('aggregated_health_state', None) self.health_events = kwargs.get('health_events', None) @@ -988,29 +869,36 @@ def __init__( class ApplicationHealth(EntityHealth): - """Represents the health of the application. Contains the application aggregated health state and the service and deployed application health states. - - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + """Represents the health of the application. Contains the application + aggregated health state and the service and deployed application health + states. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: The name of the application, including the 'fabric:' URI scheme. + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str - :param service_health_states: Service health states as found in the health store. - :type service_health_states: list[~azure.servicefabric.models.ServiceHealthState] - :param deployed_application_health_states: Deployed application health states as found in the - health store. + :param service_health_states: Service health states as found in the health + store. + :type service_health_states: + list[~azure.servicefabric.models.ServiceHealthState] + :param deployed_application_health_states: Deployed application health + states as found in the health store. :type deployed_application_health_states: list[~azure.servicefabric.models.DeployedApplicationHealthState] """ @@ -1025,41 +913,44 @@ class ApplicationHealth(EntityHealth): 'deployed_application_health_states': {'key': 'DeployedApplicationHealthStates', 'type': '[DeployedApplicationHealthState]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealth, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.service_health_states = kwargs.get('service_health_states', None) self.deployed_application_health_states = kwargs.get('deployed_application_health_states', None) -class HealthEvaluation(msrest.serialization.Model): - """Represents a health evaluation which describes the data and the algorithm used by health manager to evaluate the health of an entity. +class HealthEvaluation(Model): + """Represents a health evaluation which describes the data and the algorithm + used by health manager to evaluate the health of an entity. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationHealthEvaluation, ApplicationTypeApplicationsHealthEvaluation, ApplicationsHealthEvaluation, DeltaNodesCheckHealthEvaluation, DeployedApplicationHealthEvaluation, DeployedApplicationsHealthEvaluation, DeployedServicePackageHealthEvaluation, DeployedServicePackagesHealthEvaluation, EventHealthEvaluation, NodeHealthEvaluation, NodeTypeNodesHealthEvaluation, NodesHealthEvaluation, PartitionHealthEvaluation, PartitionsHealthEvaluation, ReplicaHealthEvaluation, ReplicasHealthEvaluation, ServiceHealthEvaluation, ServicesHealthEvaluation, SystemApplicationHealthEvaluation, UpgradeDomainDeltaNodesCheckHealthEvaluation, UpgradeDomainNodesHealthEvaluation. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + sub-classes are: ApplicationHealthEvaluation, ApplicationsHealthEvaluation, + ApplicationTypeApplicationsHealthEvaluation, + DeltaNodesCheckHealthEvaluation, DeployedApplicationHealthEvaluation, + DeployedApplicationsHealthEvaluation, + DeployedServicePackageHealthEvaluation, + DeployedServicePackagesHealthEvaluation, EventHealthEvaluation, + NodeHealthEvaluation, NodesHealthEvaluation, PartitionHealthEvaluation, + PartitionsHealthEvaluation, ReplicaHealthEvaluation, + ReplicasHealthEvaluation, ServiceHealthEvaluation, + ServicesHealthEvaluation, SystemApplicationHealthEvaluation, + UpgradeDomainDeltaNodesCheckHealthEvaluation, + UpgradeDomainNodesHealthEvaluation, NodeTypeNodesHealthEvaluation + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -1067,53 +958,49 @@ class HealthEvaluation(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'Application': 'ApplicationHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'NodeTypeNodes': 'NodeTypeNodesHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation'} + 'kind': {'Application': 'ApplicationHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation', 'NodeTypeNodes': 'NodeTypeNodesHealthEvaluation'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HealthEvaluation, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.aggregated_health_state = kwargs.get('aggregated_health_state', None) self.description = kwargs.get('description', None) + self.kind = None class ApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for an application, containing information about the data and the algorithm used by the health store to evaluate health. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for an application, containing information + about the data and the algorithm used by the health store to evaluate + health. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the application. The types of the unhealthy evaluations can be - DeployedApplicationsHealthEvaluation, ServicesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the application. The types of the + unhealthy evaluations can be DeployedApplicationsHealthEvaluation, + ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -1121,28 +1008,27 @@ class ApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Application' # type: str self.application_name = kwargs.get('application_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Application' -class ApplicationHealthPolicies(msrest.serialization.Model): - """Defines the application health policy map used to evaluate the health of an application or one of its children entities. +class ApplicationHealthPolicies(Model): + """Defines the application health policy map used to evaluate the health of an + application or one of its children entities. - :param application_health_policy_map: The wrapper that contains the map with application health - policies used to evaluate specific applications in the cluster. + :param application_health_policy_map: The wrapper that contains the map + with application health policies used to evaluate specific applications in + the cluster. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] """ @@ -1151,34 +1037,36 @@ class ApplicationHealthPolicies(msrest.serialization.Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthPolicies, self).__init__(**kwargs) self.application_health_policy_map = kwargs.get('application_health_policy_map', None) -class ApplicationHealthPolicy(msrest.serialization.Model): - """Defines a health policy used to evaluate the health of an application or one of its children entities. +class ApplicationHealthPolicy(Model): + """Defines a health policy used to evaluate the health of an application or + one of its children entities. - :param consider_warning_as_error: Indicates whether warnings are treated with the same severity - as errors. + :param consider_warning_as_error: Indicates whether warnings are treated + with the same severity as errors. Default value: False . :type consider_warning_as_error: bool - :param max_percent_unhealthy_deployed_applications: The maximum allowed percentage of unhealthy - deployed applications. Allowed values are Byte values from zero to 100. - The percentage represents the maximum tolerated percentage of deployed applications that can - be unhealthy before the application is considered in error. - This is calculated by dividing the number of unhealthy deployed applications over the number - of nodes where the application is currently deployed on in the cluster. - The computation rounds up to tolerate one failure on small numbers of nodes. Default - percentage is zero. + :param max_percent_unhealthy_deployed_applications: The maximum allowed + percentage of unhealthy deployed applications. Allowed values are Byte + values from zero to 100. + The percentage represents the maximum tolerated percentage of deployed + applications that can be unhealthy before the application is considered in + error. + This is calculated by dividing the number of unhealthy deployed + applications over the number of nodes where the application is currently + deployed on in the cluster. + The computation rounds up to tolerate one failure on small numbers of + nodes. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_deployed_applications: int - :param default_service_type_health_policy: The health policy used by default to evaluate the - health of a service type. - :type default_service_type_health_policy: ~azure.servicefabric.models.ServiceTypeHealthPolicy - :param service_type_health_policy_map: The map with service type health policy per service type - name. The map is empty by default. + :param default_service_type_health_policy: The health policy used by + default to evaluate the health of a service type. + :type default_service_type_health_policy: + ~azure.servicefabric.models.ServiceTypeHealthPolicy + :param service_type_health_policy_map: The map with service type health + policy per service type name. The map is empty by default. :type service_type_health_policy_map: list[~azure.servicefabric.models.ServiceTypeHealthPolicyMapItem] """ @@ -1190,10 +1078,7 @@ class ApplicationHealthPolicy(msrest.serialization.Model): 'service_type_health_policy_map': {'key': 'ServiceTypeHealthPolicyMap', 'type': '[ServiceTypeHealthPolicyMapItem]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthPolicy, self).__init__(**kwargs) self.consider_warning_as_error = kwargs.get('consider_warning_as_error', False) self.max_percent_unhealthy_deployed_applications = kwargs.get('max_percent_unhealthy_deployed_applications', 0) @@ -1201,16 +1086,16 @@ def __init__( self.service_type_health_policy_map = kwargs.get('service_type_health_policy_map', None) -class ApplicationHealthPolicyMapItem(msrest.serialization.Model): +class ApplicationHealthPolicyMapItem(Model): """Defines an item in ApplicationHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the application health policy map item. This is the name of - the application. + :param key: Required. The key of the application health policy map item. + This is the name of the application. :type key: str - :param value: Required. The value of the application health policy map item. This is the - ApplicationHealthPolicy for this application. + :param value: Required. The value of the application health policy map + item. This is the ApplicationHealthPolicy for this application. :type value: ~azure.servicefabric.models.ApplicationHealthPolicy """ @@ -1224,25 +1109,24 @@ class ApplicationHealthPolicyMapItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'ApplicationHealthPolicy'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthPolicyMapItem, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) -class ApplicationHealthPolicyMapObject(msrest.serialization.Model): - """Represents the map of application health policies for a ServiceFabric cluster upgrade. +class ApplicationHealthPolicyMapObject(Model): + """Represents the map of application health policies for a ServiceFabric + cluster upgrade. - :param application_health_policy_map: Defines a map that contains specific application health - policies for different applications. - Each entry specifies as key the application name and as value an ApplicationHealthPolicy used - to evaluate the application health. - If an application is not specified in the map, the application health evaluation uses the - ApplicationHealthPolicy found in its application manifest or the default application health - policy (if no health policy is defined in the manifest). + :param application_health_policy_map: Defines a map that contains specific + application health policies for different applications. + Each entry specifies as key the application name and as value an + ApplicationHealthPolicy used to evaluate the application health. + If an application is not specified in the map, the application health + evaluation uses the ApplicationHealthPolicy found in its application + manifest or the default application health policy (if no health policy is + defined in the manifest). The map is empty by default. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] @@ -1252,10 +1136,7 @@ class ApplicationHealthPolicyMapObject(msrest.serialization.Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthPolicyMapObject, self).__init__(**kwargs) self.application_health_policy_map = kwargs.get('application_health_policy_map', None) @@ -1265,44 +1146,25 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -1318,16 +1180,17 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -1341,11 +1204,11 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -1358,52 +1221,53 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthReportExpiredEvent, self).__init__(**kwargs) - self.kind = 'ApplicationHealthReportExpired' # type: str - self.application_instance_id = kwargs['application_instance_id'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.application_instance_id = kwargs.get('application_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ApplicationHealthReportExpired' -class EntityHealthState(msrest.serialization.Model): - """A base type for the health state of various entities in the cluster. It contains the aggregated health state. +class EntityHealthState(Model): + """A base type for the health state of various entities in the cluster. It + contains the aggregated health state. - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState """ _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EntityHealthState, self).__init__(**kwargs) self.aggregated_health_state = kwargs.get('aggregated_health_state', None) class ApplicationHealthState(EntityHealthState): - """Represents the health state of an application, which contains the application identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param name: The name of the application, including the 'fabric:' URI scheme. + """Represents the health state of an application, which contains the + application identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str """ @@ -1412,20 +1276,18 @@ class ApplicationHealthState(EntityHealthState): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthState, self).__init__(**kwargs) self.name = kwargs.get('name', None) -class EntityHealthStateChunk(msrest.serialization.Model): - """A base type for the health state chunk of various entities in the cluster. It contains the aggregated health state. +class EntityHealthStateChunk(Model): + """A base type for the health state chunk of various entities in the cluster. + It contains the aggregated health state. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState """ @@ -1433,31 +1295,35 @@ class EntityHealthStateChunk(msrest.serialization.Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EntityHealthStateChunk, self).__init__(**kwargs) self.health_state = kwargs.get('health_state', None) class ApplicationHealthStateChunk(EntityHealthStateChunk): """Represents the health state chunk of a application. -The application health state chunk contains the application name, its aggregated health state and any children services and deployed applications that respect the filters in cluster health chunk query description. + The application health state chunk contains the application name, its + aggregated health state and any children services and deployed applications + that respect the filters in cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param application_type_name: The application type name as defined in the application manifest. + :param application_type_name: The application type name as defined in the + application manifest. :type application_type_name: str - :param service_health_state_chunks: The list of service health state chunks in the cluster that - respect the filters in the cluster health chunk query description. - :type service_health_state_chunks: ~azure.servicefabric.models.ServiceHealthStateChunkList - :param deployed_application_health_state_chunks: The list of deployed application health state - chunks in the cluster that respect the filters in the cluster health chunk query description. + :param service_health_state_chunks: The list of service health state + chunks in the cluster that respect the filters in the cluster health chunk + query description. + :type service_health_state_chunks: + ~azure.servicefabric.models.ServiceHealthStateChunkList + :param deployed_application_health_state_chunks: The list of deployed + application health state chunks in the cluster that respect the filters in + the cluster health chunk query description. :type deployed_application_health_state_chunks: ~azure.servicefabric.models.DeployedApplicationHealthStateChunkList """ @@ -1470,10 +1336,7 @@ class ApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_application_health_state_chunks': {'key': 'DeployedApplicationHealthStateChunks', 'type': 'DeployedApplicationHealthStateChunkList'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthStateChunk, self).__init__(**kwargs) self.application_name = kwargs.get('application_name', None) self.application_type_name = kwargs.get('application_type_name', None) @@ -1481,11 +1344,12 @@ def __init__( self.deployed_application_health_state_chunks = kwargs.get('deployed_application_health_state_chunks', None) -class EntityHealthStateChunkList(msrest.serialization.Model): - """A base type for the list of health state chunks found in the cluster. It contains the total number of health states that match the input filters. +class EntityHealthStateChunkList(Model): + """A base type for the list of health state chunks found in the cluster. It + contains the total number of health states that match the input filters. - :param total_count: Total number of entity health state objects that match the specified - filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match + the specified filters from the cluster health chunk query description. :type total_count: long """ @@ -1493,22 +1357,21 @@ class EntityHealthStateChunkList(msrest.serialization.Model): 'total_count': {'key': 'TotalCount', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EntityHealthStateChunkList, self).__init__(**kwargs) self.total_count = kwargs.get('total_count', None) class ApplicationHealthStateChunkList(EntityHealthStateChunkList): - """The list of application health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. + """The list of application health state chunks in the cluster that respect the + input filters in the chunk query. Returned by get cluster health state + chunks query. - :param total_count: Total number of entity health state objects that match the specified - filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match + the specified filters from the cluster health chunk query description. :type total_count: long - :param items: The list of application health state chunks that respect the input filters in the - chunk query. + :param items: The list of application health state chunks that respect the + input filters in the chunk query. :type items: list[~azure.servicefabric.models.ApplicationHealthStateChunk] """ @@ -1517,78 +1380,87 @@ class ApplicationHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[ApplicationHealthStateChunk]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class ApplicationHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a application should be included in the cluster health chunk. -One filter can match zero, one or multiple applications, depending on its properties. +class ApplicationHealthStateFilter(Model): + """Defines matching criteria to determine whether a application should be + included in the cluster health chunk. + One filter can match zero, one or multiple applications, depending on its + properties. - :param application_name_filter: The name of the application that matches the filter, as a - fabric uri. The filter is applied only to the specified application, if it exists. - If the application doesn't exist, no application is returned in the cluster health chunk based - on this filter. - If the application exists, it is included in the cluster health chunk if it respects the other - filter properties. - If not specified, all applications are matched against the other filter members, like health - state filter. - :type application_name_filter: str - :param application_type_name_filter: The name of the application type that matches the filter. - If specified, the filter is applied only to applications of the selected application type, if - any exists. - If no applications of the specified application type exists, no application is returned in the + :param application_name_filter: The name of the application that matches + the filter, as a fabric uri. The filter is applied only to the specified + application, if it exists. + If the application doesn't exist, no application is returned in the cluster health chunk based on this filter. - Each application of the specified application type is included in the cluster health chunk if + If the application exists, it is included in the cluster health chunk if it respects the other filter properties. - If not specified, all applications are matched against the other filter members, like health - state filter. + If not specified, all applications are matched against the other filter + members, like health state filter. + :type application_name_filter: str + :param application_type_name_filter: The name of the application type that + matches the filter. + If specified, the filter is applied only to applications of the selected + application type, if any exists. + If no applications of the specified application type exists, no + application is returned in the cluster health chunk based on this filter. + Each application of the specified application type is included in the + cluster health chunk if it respects the other filter properties. + If not specified, all applications are matched against the other filter + members, like health state filter. :type application_type_name_filter: str - :param health_state_filter: The filter for the health state of the applications. It allows - selecting applications if they match the desired health states. - The possible values are integer value of one of the following health states. Only applications - that match the filter are returned. All applications are used to evaluate the cluster - aggregated health state. - If not specified, default value is None, unless the application name or the application type - name are specified. If the filter has default value and application name is specified, the - matching application is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches applications with HealthState value of OK - (2) and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + applications. It allows selecting applications if they match the desired + health states. + The possible values are integer value of one of the following health + states. Only applications that match the filter are returned. All + applications are used to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the application name or + the application type name are specified. If the filter has default value + and application name is specified, the matching application is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches applications with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int - :param service_filters: Defines a list of filters that specify which services to be included in - the returned cluster health chunk as children of the application. The services are returned - only if the parent application matches a filter. - If the list is empty, no services are returned. All the services are used to evaluate the - parent application aggregated health state, regardless of the input filters. + :param service_filters: Defines a list of filters that specify which + services to be included in the returned cluster health chunk as children + of the application. The services are returned only if the parent + application matches a filter. + If the list is empty, no services are returned. All the services are used + to evaluate the parent application aggregated health state, regardless of + the input filters. The application filter may specify multiple service filters. - For example, it can specify a filter to return all services with health state Error and - another filter to always include a service identified by its service name. - :type service_filters: list[~azure.servicefabric.models.ServiceHealthStateFilter] - :param deployed_application_filters: Defines a list of filters that specify which deployed - applications to be included in the returned cluster health chunk as children of the - application. The deployed applications are returned only if the parent application matches a - filter. - If the list is empty, no deployed applications are returned. All the deployed applications are - used to evaluate the parent application aggregated health state, regardless of the input - filters. + For example, it can specify a filter to return all services with health + state Error and another filter to always include a service identified by + its service name. + :type service_filters: + list[~azure.servicefabric.models.ServiceHealthStateFilter] + :param deployed_application_filters: Defines a list of filters that + specify which deployed applications to be included in the returned cluster + health chunk as children of the application. The deployed applications are + returned only if the parent application matches a filter. + If the list is empty, no deployed applications are returned. All the + deployed applications are used to evaluate the parent application + aggregated health state, regardless of the input filters. The application filter may specify multiple deployed application filters. - For example, it can specify a filter to return all deployed applications with health state - Error and another filter to always include a deployed application on a specified node. + For example, it can specify a filter to return all deployed applications + with health state Error and another filter to always include a deployed + application on a specified node. :type deployed_application_filters: list[~azure.servicefabric.models.DeployedApplicationHealthStateFilter] """ @@ -1601,10 +1473,7 @@ class ApplicationHealthStateFilter(msrest.serialization.Model): 'deployed_application_filters': {'key': 'DeployedApplicationFilters', 'type': '[DeployedApplicationHealthStateFilter]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationHealthStateFilter, self).__init__(**kwargs) self.application_name_filter = kwargs.get('application_name_filter', None) self.application_type_name_filter = kwargs.get('application_type_name_filter', None) @@ -1613,36 +1482,43 @@ def __init__( self.deployed_application_filters = kwargs.get('deployed_application_filters', None) -class ApplicationInfo(msrest.serialization.Model): +class ApplicationInfo(Model): """Information about a Service Fabric application. - :param id: The identity of the application. This is an encoded representation of the - application name. This is used in the REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI scheme. + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str - :param type_name: The application type name as defined in the application manifest. - :type type_name: str - :param type_version: The version of the application type as defined in the application + :param type_name: The application type name as defined in the application manifest. + :type type_name: str + :param type_version: The version of the application type as defined in the + application manifest. :type type_version: str - :param status: The status of the application. Possible values include: "Invalid", "Ready", - "Upgrading", "Creating", "Deleting", "Failed". + :param status: The status of the application. Possible values include: + 'Invalid', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :type status: str or ~azure.servicefabric.models.ApplicationStatus - :param parameters: List of application parameters with overridden values from their default - values specified in the application manifest. + :param parameters: List of application parameters with overridden values + from their default values specified in the application manifest. :type parameters: list[~azure.servicefabric.models.ApplicationParameter] - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param application_definition_kind: The mechanism used to define a Service Fabric application. - Possible values include: "Invalid", "ServiceFabricApplicationDescription", "Compose". - :type application_definition_kind: str or ~azure.servicefabric.models.ApplicationDefinitionKind - :param managed_application_identity: Managed application identity description. + :param application_definition_kind: The mechanism used to define a Service + Fabric application. Possible values include: 'Invalid', + 'ServiceFabricApplicationDescription', 'Compose' + :type application_definition_kind: str or + ~azure.servicefabric.models.ApplicationDefinitionKind + :param managed_application_identity: Managed application identity + description. :type managed_application_identity: ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ @@ -1659,10 +1535,7 @@ class ApplicationInfo(msrest.serialization.Model): 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.name = kwargs.get('name', None) @@ -1675,28 +1548,37 @@ def __init__( self.managed_application_identity = kwargs.get('managed_application_identity', None) -class ApplicationLoadInfo(msrest.serialization.Model): +class ApplicationLoadInfo(Model): """Load Information about a Service Fabric application. - :param id: The identity of the application. This is an encoded representation of the - application name. This is used in the REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type id: str :param minimum_nodes: The minimum number of nodes for this application. - It is the number of nodes where Service Fabric will reserve Capacity in the cluster which - equals to ReservedLoad * MinimumNodes for this Application instance. - For applications that do not have application capacity defined this value will be zero. + It is the number of nodes where Service Fabric will reserve Capacity in + the cluster which equals to ReservedLoad * MinimumNodes for this + Application instance. + For applications that do not have application capacity defined this value + will be zero. :type minimum_nodes: long - :param maximum_nodes: The maximum number of nodes where this application can be instantiated. + :param maximum_nodes: The maximum number of nodes where this application + can be instantiated. It is the number of nodes this application is allowed to span. - For applications that do not have application capacity defined this value will be zero. + For applications that do not have application capacity defined this value + will be zero. :type maximum_nodes: long - :param node_count: The number of nodes on which this application is instantiated. - For applications that do not have application capacity defined this value will be zero. + :param node_count: The number of nodes on which this application is + instantiated. + For applications that do not have application capacity defined this value + will be zero. :type node_count: long - :param application_load_metric_information: List of application load metric information. + :param application_load_metric_information: List of application load + metric information. :type application_load_metric_information: list[~azure.servicefabric.models.ApplicationLoadMetricInformation] """ @@ -1709,10 +1591,7 @@ class ApplicationLoadInfo(msrest.serialization.Model): 'application_load_metric_information': {'key': 'ApplicationLoadMetricInformation', 'type': '[ApplicationLoadMetricInformation]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationLoadInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.minimum_nodes = kwargs.get('minimum_nodes', None) @@ -1721,20 +1600,26 @@ def __init__( self.application_load_metric_information = kwargs.get('application_load_metric_information', None) -class ApplicationLoadMetricInformation(msrest.serialization.Model): - """Describes load information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. +class ApplicationLoadMetricInformation(Model): + """Describes load information for a custom resource balancing metric. This can + be used to limit the total consumption of this metric by the services of + this application. :param name: The name of the metric. :type name: str - :param reservation_capacity: This is the capacity reserved in the cluster for the application. + :param reservation_capacity: This is the capacity reserved in the cluster + for the application. It's the product of NodeReservationCapacity and MinimumNodes. If set to zero, no capacity is reserved for this metric. - When setting application capacity or when updating application capacity this value must be - smaller than or equal to MaximumCapacity for each metric. + When setting application capacity or when updating application capacity + this value must be smaller than or equal to MaximumCapacity for each + metric. :type reservation_capacity: long - :param application_capacity: Total capacity for this metric in this application instance. + :param application_capacity: Total capacity for this metric in this + application instance. :type application_capacity: long - :param application_load: Current load for this metric in this application instance. + :param application_load: Current load for this metric in this application + instance. :type application_load: long """ @@ -1745,10 +1630,7 @@ class ApplicationLoadMetricInformation(msrest.serialization.Model): 'application_load': {'key': 'ApplicationLoad', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationLoadMetricInformation, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.reservation_capacity = kwargs.get('reservation_capacity', None) @@ -1756,35 +1638,46 @@ def __init__( self.application_load = kwargs.get('application_load', None) -class ApplicationMetricDescription(msrest.serialization.Model): - """Describes capacity information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. +class ApplicationMetricDescription(Model): + """Describes capacity information for a custom resource balancing metric. This + can be used to limit the total consumption of this metric by the services + of this application. :param name: The name of the metric. :type name: str - :param maximum_capacity: The maximum node capacity for Service Fabric application. - This is the maximum Load for an instance of this application on a single node. Even if the - capacity of node is greater than this value, Service Fabric will limit the total load of - services within the application on each node to this value. + :param maximum_capacity: The maximum node capacity for Service Fabric + application. + This is the maximum Load for an instance of this application on a single + node. Even if the capacity of node is greater than this value, Service + Fabric will limit the total load of services within the application on + each node to this value. If set to zero, capacity for this metric is unlimited on each node. - When creating a new application with application capacity defined, the product of MaximumNodes - and this value must always be smaller than or equal to TotalApplicationCapacity. - When updating existing application with application capacity, the product of MaximumNodes and - this value must always be smaller than or equal to TotalApplicationCapacity. + When creating a new application with application capacity defined, the + product of MaximumNodes and this value must always be smaller than or + equal to TotalApplicationCapacity. + When updating existing application with application capacity, the product + of MaximumNodes and this value must always be smaller than or equal to + TotalApplicationCapacity. :type maximum_capacity: long - :param reservation_capacity: The node reservation capacity for Service Fabric application. - This is the amount of load which is reserved on nodes which have instances of this - application. - If MinimumNodes is specified, then the product of these values will be the capacity reserved - in the cluster for the application. + :param reservation_capacity: The node reservation capacity for Service + Fabric application. + This is the amount of load which is reserved on nodes which have instances + of this application. + If MinimumNodes is specified, then the product of these values will be the + capacity reserved in the cluster for the application. If set to zero, no capacity is reserved for this metric. - When setting application capacity or when updating application capacity; this value must be - smaller than or equal to MaximumCapacity for each metric. + When setting application capacity or when updating application capacity; + this value must be smaller than or equal to MaximumCapacity for each + metric. :type reservation_capacity: long - :param total_application_capacity: The total metric capacity for Service Fabric application. - This is the total metric capacity for this application in the cluster. Service Fabric will try - to limit the sum of loads of services within the application to this value. - When creating a new application with application capacity defined, the product of MaximumNodes - and MaximumCapacity must always be smaller than or equal to this value. + :param total_application_capacity: The total metric capacity for Service + Fabric application. + This is the total metric capacity for this application in the cluster. + Service Fabric will try to limit the sum of loads of services within the + application to this value. + When creating a new application with application capacity defined, the + product of MaximumNodes and MaximumCapacity must always be smaller than or + equal to this value. :type total_application_capacity: long """ @@ -1795,10 +1688,7 @@ class ApplicationMetricDescription(msrest.serialization.Model): 'total_application_capacity': {'key': 'TotalApplicationCapacity', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationMetricDescription, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.maximum_capacity = kwargs.get('maximum_capacity', None) @@ -1806,16 +1696,19 @@ def __init__( self.total_application_capacity = kwargs.get('total_application_capacity', None) -class ApplicationNameInfo(msrest.serialization.Model): +class ApplicationNameInfo(Model): """Information about the application name. - :param id: The identity of the application. This is an encoded representation of the - application name. This is used in the REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI scheme. + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str """ @@ -1824,10 +1717,7 @@ class ApplicationNameInfo(msrest.serialization.Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationNameInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.name = kwargs.get('name', None) @@ -1838,44 +1728,25 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -1891,16 +1762,17 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -1914,11 +1786,11 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -1931,25 +1803,23 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationNewHealthReportEvent, self).__init__(**kwargs) - self.kind = 'ApplicationNewHealthReport' # type: str - self.application_instance_id = kwargs['application_instance_id'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.application_instance_id = kwargs.get('application_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ApplicationNewHealthReport' -class ApplicationParameter(msrest.serialization.Model): - """Describes an application parameter override to be applied when creating or upgrading an application. +class ApplicationParameter(Model): + """Describes an application parameter override to be applied when creating or + upgrading an application. All required parameters must be populated in order to send to Azure. @@ -1969,13 +1839,10 @@ class ApplicationParameter(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationParameter, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) class ApplicationProcessExitedEvent(ApplicationEvent): @@ -1983,50 +1850,32 @@ class ApplicationProcessExitedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param service_name: Required. Name of Service. :type service_name: str :param service_package_name: Required. Name of Service package. :type service_package_name: str - :param service_package_activation_id: Required. Activation Id of Service package. + :param service_package_activation_id: Required. Activation Id of Service + package. :type service_package_activation_id: str :param is_exclusive: Required. Indicates IsExclusive flag. :type is_exclusive: bool @@ -2042,16 +1891,17 @@ class ApplicationProcessExitedEvent(ApplicationEvent): :type host_id: str :param exit_code: Required. Exit code of process. :type exit_code: long - :param unexpected_termination: Required. Indicates if termination is unexpected. + :param unexpected_termination: Required. Indicates if termination is + unexpected. :type unexpected_termination: bool :param start_time: Required. Start time of process. - :type start_time: ~datetime.datetime + :type start_time: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'service_name': {'required': True}, 'service_package_name': {'required': True}, @@ -2068,11 +1918,11 @@ class ApplicationProcessExitedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, @@ -2088,62 +1938,64 @@ class ApplicationProcessExitedEvent(ApplicationEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationProcessExitedEvent, self).__init__(**kwargs) - self.kind = 'ApplicationProcessExited' # type: str - self.service_name = kwargs['service_name'] - self.service_package_name = kwargs['service_package_name'] - self.service_package_activation_id = kwargs['service_package_activation_id'] - self.is_exclusive = kwargs['is_exclusive'] - self.code_package_name = kwargs['code_package_name'] - self.entry_point_type = kwargs['entry_point_type'] - self.exe_name = kwargs['exe_name'] - self.process_id = kwargs['process_id'] - self.host_id = kwargs['host_id'] - self.exit_code = kwargs['exit_code'] - self.unexpected_termination = kwargs['unexpected_termination'] - self.start_time = kwargs['start_time'] - - -class ApplicationResourceDescription(msrest.serialization.Model): + self.service_name = kwargs.get('service_name', None) + self.service_package_name = kwargs.get('service_package_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.is_exclusive = kwargs.get('is_exclusive', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.entry_point_type = kwargs.get('entry_point_type', None) + self.exe_name = kwargs.get('exe_name', None) + self.process_id = kwargs.get('process_id', None) + self.host_id = kwargs.get('host_id', None) + self.exit_code = kwargs.get('exit_code', None) + self.unexpected_termination = kwargs.get('unexpected_termination', None) + self.start_time = kwargs.get('start_time', None) + self.kind = 'ApplicationProcessExited' + + +class ApplicationResourceDescription(Model): """This type describes a application resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the Application resource. :type name: str - :param identity: Describes the identity of the application. - :type identity: ~azure.servicefabric.models.IdentityDescription :param description: User readable description of the application. :type description: str - :param services: Describes the services in the application. This property is used to create or - modify services of the application. On get only the name of the service is returned. The - service description can be obtained by querying for the service resource. - :type services: list[~azure.servicefabric.models.ServiceResourceDescription] - :param diagnostics: Describes the diagnostics definition and usage for an application resource. + :param services: Describes the services in the application. This property + is used to create or modify services of the application. On get only the + name of the service is returned. The service description can be obtained + by querying for the service resource. + :type services: + list[~azure.servicefabric.models.ServiceResourceDescription] + :param diagnostics: Describes the diagnostics definition and usage for an + application resource. :type diagnostics: ~azure.servicefabric.models.DiagnosticsDescription - :param debug_params: Internal - used by Visual Studio to setup the debugging session on the - local development environment. + :param debug_params: Internal - used by Visual Studio to setup the + debugging session on the local development environment. :type debug_params: str :ivar service_names: Names of the services in the application. :vartype service_names: list[str] - :ivar status: Status of the application. Possible values include: "Unknown", "Ready", - "Upgrading", "Creating", "Deleting", "Failed". + :ivar status: Status of the application. Possible values include: + 'Unknown', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the application. + :ivar status_details: Gives additional information about the current + status of the application. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. Possible values - include: "Invalid", "Ok", "Warning", "Error", "Unknown". + :ivar health_state: Describes the health state of an application resource. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the application's health state is not 'Ok', this additional - details from service fabric Health Manager for the user to know why the application is marked - unhealthy. + :ivar unhealthy_evaluation: When the application's health state is not + 'Ok', this additional details from service fabric Health Manager for the + user to know why the application is marked unhealthy. :vartype unhealthy_evaluation: str + :param identity: Describes the identity of the application. + :type identity: ~azure.servicefabric.models.IdentityDescription """ _validation = { @@ -2157,7 +2009,6 @@ class ApplicationResourceDescription(msrest.serialization.Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, - 'identity': {'key': 'identity', 'type': 'IdentityDescription'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'services': {'key': 'properties.services', 'type': '[ServiceResourceDescription]'}, 'diagnostics': {'key': 'properties.diagnostics', 'type': 'DiagnosticsDescription'}, @@ -2167,15 +2018,12 @@ class ApplicationResourceDescription(msrest.serialization.Model): 'status_details': {'key': 'properties.statusDetails', 'type': 'str'}, 'health_state': {'key': 'properties.healthState', 'type': 'str'}, 'unhealthy_evaluation': {'key': 'properties.unhealthyEvaluation', 'type': 'str'}, + 'identity': {'key': 'identity', 'type': 'IdentityDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationResourceDescription, self).__init__(**kwargs) - self.name = kwargs['name'] - self.identity = kwargs.get('identity', None) + self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) self.services = kwargs.get('services', None) self.diagnostics = kwargs.get('diagnostics', None) @@ -2185,45 +2033,56 @@ def __init__( self.status_details = None self.health_state = None self.unhealthy_evaluation = None + self.identity = kwargs.get('identity', None) -class ApplicationResourceUpgradeProgressInfo(msrest.serialization.Model): +class ApplicationResourceUpgradeProgressInfo(Model): """This type describes an application resource upgrade. :param name: Name of the Application resource. :type name: str - :param target_application_type_version: The target application version for the application - upgrade. + :param target_application_type_version: The target application version for + the application upgrade. :type target_application_type_version: str - :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade + started. :type start_timestamp_utc: str - :param upgrade_state: The state of the application resource upgrade. Possible values include: - "Invalid", "ProvisioningTarget", "RollingForward", "UnprovisioningCurrent", - "CompletedRollforward", "RollingBack", "UnprovisioningTarget", "CompletedRollback", "Failed". - :type upgrade_state: str or ~azure.servicefabric.models.ApplicationResourceUpgradeState - :param percent_completed: The estimated percent of replicas are completed in the upgrade. + :param upgrade_state: The state of the application resource upgrade. + Possible values include: 'Invalid', 'ProvisioningTarget', + 'RollingForward', 'UnprovisioningCurrent', 'CompletedRollforward', + 'RollingBack', 'UnprovisioningTarget', 'CompletedRollback', 'Failed' + :type upgrade_state: str or + ~azure.servicefabric.models.ApplicationResourceUpgradeState + :param percent_completed: The estimated percent of replicas are completed + in the upgrade. :type percent_completed: str :param service_upgrade_progress: List of service upgrade progresses. - :type service_upgrade_progress: list[~azure.servicefabric.models.ServiceUpgradeProgress] - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: "Monitored". - :type rolling_upgrade_mode: str or ~azure.servicefabric.models.RollingUpgradeMode - :param upgrade_duration: The estimated amount of time that the overall upgrade elapsed. It is - first interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :type service_upgrade_progress: + list[~azure.servicefabric.models.ServiceUpgradeProgress] + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "Monitored" . + :type rolling_upgrade_mode: str or + ~azure.servicefabric.models.RollingUpgradeMode + :param upgrade_duration: The estimated amount of time that the overall + upgrade elapsed. It is first interpreted as a string representing an ISO + 8601 duration. If that fails, then it is interpreted as a number + representing the total number of milliseconds. Default value: "PT0H2M0S" . :type upgrade_duration: str - :param application_upgrade_status_details: Additional detailed information about the status of - the pending upgrade. + :param application_upgrade_status_details: Additional detailed information + about the status of the pending upgrade. :type application_upgrade_status_details: str - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). Default value: 42949672925 . :type upgrade_replica_set_check_timeout_in_seconds: long - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and - FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade + failed and FailureAction was executed. :type failure_timestamp_utc: str """ @@ -2241,10 +2100,7 @@ class ApplicationResourceUpgradeProgressInfo(msrest.serialization.Model): 'failure_timestamp_utc': {'key': 'FailureTimestampUtc', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationResourceUpgradeProgressInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.target_application_type_version = kwargs.get('target_application_type_version', None) @@ -2259,17 +2115,18 @@ def __init__( self.failure_timestamp_utc = kwargs.get('failure_timestamp_utc', None) -class VolumeReference(msrest.serialization.Model): +class VolumeReference(Model): """Describes a reference to a volume resource. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the volume being referenced. :type name: str - :param read_only: The flag indicating whether the volume is read only. Default is 'false'. + :param read_only: The flag indicating whether the volume is read only. + Default is 'false'. :type read_only: bool - :param destination_path: Required. The path within the container at which the volume should be - mounted. Only valid path characters are allowed. + :param destination_path: Required. The path within the container at which + the volume should be mounted. Only valid path characters are allowed. :type destination_path: str """ @@ -2284,14 +2141,11 @@ class VolumeReference(msrest.serialization.Model): 'destination_path': {'key': 'destinationPath', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(VolumeReference, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.read_only = kwargs.get('read_only', None) - self.destination_path = kwargs['destination_path'] + self.destination_path = kwargs.get('destination_path', None) class ApplicationScopedVolume(VolumeReference): @@ -2301,13 +2155,14 @@ class ApplicationScopedVolume(VolumeReference): :param name: Required. Name of the volume being referenced. :type name: str - :param read_only: The flag indicating whether the volume is read only. Default is 'false'. + :param read_only: The flag indicating whether the volume is read only. + Default is 'false'. :type read_only: bool - :param destination_path: Required. The path within the container at which the volume should be - mounted. Only valid path characters are allowed. + :param destination_path: Required. The path within the container at which + the volume should be mounted. Only valid path characters are allowed. :type destination_path: str - :param creation_parameters: Required. Describes parameters for creating application-scoped - volumes. + :param creation_parameters: Required. Describes parameters for creating + application-scoped volumes. :type creation_parameters: ~azure.servicefabric.models.ApplicationScopedVolumeCreationParameters """ @@ -2325,27 +2180,24 @@ class ApplicationScopedVolume(VolumeReference): 'creation_parameters': {'key': 'creationParameters', 'type': 'ApplicationScopedVolumeCreationParameters'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationScopedVolume, self).__init__(**kwargs) - self.creation_parameters = kwargs['creation_parameters'] + self.creation_parameters = kwargs.get('creation_parameters', None) -class ApplicationScopedVolumeCreationParameters(msrest.serialization.Model): +class ApplicationScopedVolumeCreationParameters(Model): """Describes parameters for creating application-scoped volumes. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk. + sub-classes are: + ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the application-scoped volume kind.Constant filled by server. - Possible values include: "ServiceFabricVolumeDisk". - :type kind: str or ~azure.servicefabric.models.ApplicationScopedVolumeKind :param description: User readable description of the volume. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -2353,34 +2205,32 @@ class ApplicationScopedVolumeCreationParameters(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'kind', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, } _subtype_map = { 'kind': {'ServiceFabricVolumeDisk': 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationScopedVolumeCreationParameters, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.description = kwargs.get('description', None) + self.kind = None class ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk(ApplicationScopedVolumeCreationParameters): - """Describes parameters for creating application-scoped volumes provided by Service Fabric Volume Disks. + """Describes parameters for creating application-scoped volumes provided by + Service Fabric Volume Disks. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the application-scoped volume kind.Constant filled by server. - Possible values include: "ServiceFabricVolumeDisk". - :type kind: str or ~azure.servicefabric.models.ApplicationScopedVolumeKind :param description: User readable description of the volume. :type description: str - :param size_disk: Required. Volume size. Possible values include: "Small", "Medium", "Large". + :param kind: Required. Constant filled by server. + :type kind: str + :param size_disk: Required. Volume size. Possible values include: 'Small', + 'Medium', 'Large' :type size_disk: str or ~azure.servicefabric.models.SizeTypes """ @@ -2390,51 +2240,45 @@ class ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk(Applicati } _attribute_map = { - 'kind': {'key': 'kind', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, 'size_disk': {'key': 'sizeDisk', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk, self).__init__(**kwargs) - self.kind = 'ServiceFabricVolumeDisk' # type: str - self.size_disk = kwargs['size_disk'] + self.size_disk = kwargs.get('size_disk', None) + self.kind = 'ServiceFabricVolumeDisk' class ApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for applications, containing health evaluations for each unhealthy application that impacted current aggregated health state. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for applications, containing health + evaluations for each unhealthy application that impacted current aggregated + health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_applications: Maximum allowed percentage of unhealthy applications - from the ClusterHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_applications: Maximum allowed percentage of + unhealthy applications from the ClusterHealthPolicy. :type max_percent_unhealthy_applications: int :param total_count: Total number of applications from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy ApplicationHealthEvaluation that impacted the aggregated - health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ApplicationHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -2442,59 +2286,59 @@ class ApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationsHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Applications' # type: str self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Applications' class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for applications of a particular application type. The application type applications evaluation can be returned when cluster health evaluation returns unhealthy aggregated health state, either Error or Warning. It contains health evaluations for each unhealthy application of the included application type that impacted current aggregated health state. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for applications of a particular application + type. The application type applications evaluation can be returned when + cluster health evaluation returns unhealthy aggregated health state, either + Error or Warning. It contains health evaluations for each unhealthy + application of the included application type that impacted current + aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param application_type_name: The application type name as defined in the application manifest. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_type_name: The application type name as defined in the + application manifest. :type application_type_name: str - :param max_percent_unhealthy_applications: Maximum allowed percentage of unhealthy applications - for the application type, specified as an entry in ApplicationTypeHealthPolicyMap. + :param max_percent_unhealthy_applications: Maximum allowed percentage of + unhealthy applications for the application type, specified as an entry in + ApplicationTypeHealthPolicyMap. :type max_percent_unhealthy_applications: int - :param total_count: Total number of applications of the application type found in the health - store. + :param total_count: Total number of applications of the application type + found in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy ApplicationHealthEvaluation of this application type that - impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ApplicationHealthEvaluation of this application type that impacted the + aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -2502,38 +2346,36 @@ class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationTypeApplicationsHealthEvaluation, self).__init__(**kwargs) - self.kind = 'ApplicationTypeApplications' # type: str self.application_type_name = kwargs.get('application_type_name', None) self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'ApplicationTypeApplications' -class ApplicationTypeHealthPolicyMapItem(msrest.serialization.Model): +class ApplicationTypeHealthPolicyMapItem(Model): """Defines an item in ApplicationTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the application type health policy map item. This is the name - of the application type. + :param key: Required. The key of the application type health policy map + item. This is the name of the application type. :type key: str - :param value: Required. The value of the application type health policy map item. - The max percent unhealthy applications allowed for the application type. Must be between zero - and 100. + :param value: Required. The value of the application type health policy + map item. + The max percent unhealthy applications allowed for the application type. + Must be between zero and 100. :type value: int """ @@ -2547,22 +2389,20 @@ class ApplicationTypeHealthPolicyMapItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationTypeHealthPolicyMapItem, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) -class ApplicationTypeImageStorePath(msrest.serialization.Model): - """Path description for the application package in the image store specified during the prior copy operation. +class ApplicationTypeImageStorePath(Model): + """Path description for the application package in the image store specified + during the prior copy operation. All required parameters must be populated in order to send to Azure. - :param application_type_build_path: Required. The relative image store path to the application - package. + :param application_type_build_path: Required. The relative image store + path to the application package. :type application_type_build_path: str """ @@ -2574,33 +2414,34 @@ class ApplicationTypeImageStorePath(msrest.serialization.Model): 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationTypeImageStorePath, self).__init__(**kwargs) - self.application_type_build_path = kwargs['application_type_build_path'] + self.application_type_build_path = kwargs.get('application_type_build_path', None) -class ApplicationTypeInfo(msrest.serialization.Model): +class ApplicationTypeInfo(Model): """Information about an application type. - :param name: The application type name as defined in the application manifest. + :param name: The application type name as defined in the application + manifest. :type name: str - :param version: The version of the application type as defined in the application manifest. + :param version: The version of the application type as defined in the + application manifest. :type version: str - :param default_parameter_list: List of application type parameters that can be overridden when - creating or updating the application. - :type default_parameter_list: list[~azure.servicefabric.models.ApplicationParameter] - :param status: The status of the application type. Possible values include: "Invalid", - "Provisioning", "Available", "Unprovisioning", "Failed". + :param default_parameter_list: List of application type parameters that + can be overridden when creating or updating the application. + :type default_parameter_list: + list[~azure.servicefabric.models.ApplicationParameter] + :param status: The status of the application type. Possible values + include: 'Invalid', 'Provisioning', 'Available', 'Unprovisioning', + 'Failed' :type status: str or ~azure.servicefabric.models.ApplicationTypeStatus - :param status_details: Additional detailed information about the status of the application - type. + :param status_details: Additional detailed information about the status of + the application type. :type status_details: str - :param application_type_definition_kind: The mechanism used to define a Service Fabric - application type. Possible values include: "Invalid", "ServiceFabricApplicationPackage", - "Compose". + :param application_type_definition_kind: The mechanism used to define a + Service Fabric application type. Possible values include: 'Invalid', + 'ServiceFabricApplicationPackage', 'Compose' :type application_type_definition_kind: str or ~azure.servicefabric.models.ApplicationTypeDefinitionKind """ @@ -2614,10 +2455,7 @@ class ApplicationTypeInfo(msrest.serialization.Model): 'application_type_definition_kind': {'key': 'ApplicationTypeDefinitionKind', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationTypeInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.version = kwargs.get('version', None) @@ -2627,8 +2465,9 @@ def __init__( self.application_type_definition_kind = kwargs.get('application_type_definition_kind', None) -class ApplicationTypeManifest(msrest.serialization.Model): - """Contains the manifest describing an application type registered in a Service Fabric cluster. +class ApplicationTypeManifest(Model): + """Contains the manifest describing an application type registered in a + Service Fabric cluster. :param manifest: The XML manifest as a string. :type manifest: str @@ -2638,10 +2477,7 @@ class ApplicationTypeManifest(msrest.serialization.Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationTypeManifest, self).__init__(**kwargs) self.manifest = kwargs.get('manifest', None) @@ -2651,57 +2487,39 @@ class ApplicationUpgradeCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str :param application_type_version: Required. Application type version. :type application_type_version: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -2709,78 +2527,96 @@ class ApplicationUpgradeCompletedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationUpgradeCompletedEvent, self).__init__(**kwargs) - self.kind = 'ApplicationUpgradeCompleted' # type: str - self.application_type_name = kwargs['application_type_name'] - self.application_type_version = kwargs['application_type_version'] - self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ApplicationUpgradeCompleted' -class ApplicationUpgradeDescription(msrest.serialization.Model): - """Describes the parameters for an application upgrade. Note that upgrade description replaces the existing application description. This means that if the parameters are not specified, the existing parameters on the applications will be overwritten with the empty parameters list. This would result in the application using the default value of the parameters from the application manifest. If you do not want to change any existing parameter values, please get the application parameters first using the GetApplicationInfo query and then supply those values as Parameters in this ApplicationUpgradeDescription. +class ApplicationUpgradeDescription(Model): + """Describes the parameters for an application upgrade. Note that upgrade + description replaces the existing application description. This means that + if the parameters are not specified, the existing parameters on the + applications will be overwritten with the empty parameters list. This would + result in the application using the default value of the parameters from + the application manifest. If you do not want to change any existing + parameter values, please get the application parameters first using the + GetApplicationInfo query and then supply those values as Parameters in this + ApplicationUpgradeDescription. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the target application, including the 'fabric:' URI scheme. + :param name: Required. The name of the target application, including the + 'fabric:' URI scheme. :type name: str - :param target_application_type_version: Required. The target application type version (found in - the application manifest) for the application upgrade. + :param target_application_type_version: Required. The target application + type version (found in the application manifest) for the application + upgrade. :type target_application_type_version: str - :param parameters: List of application parameters with overridden values from their default - values specified in the application manifest. + :param parameters: List of application parameters with overridden values + from their default values specified in the application manifest. :type parameters: list[~azure.servicefabric.models.ApplicationParameter] - :param upgrade_kind: Required. The kind of upgrade out of the following possible values. - Possible values include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible - values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", - "ReverseLexicographical". Default value: "Default". + :param sort_order: Defines the order in which an upgrade proceeds through + the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', + 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default + value: "Default" . :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate the health of an - application or one of its children entities. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a - stateless instance is closed, to allow the active requests to drain gracefully. This would be - effective when the instance is closing during the application/cluster - upgrade, only for those instances which have a non-zero delay duration configured in the - service description. See InstanceCloseDelayDurationSeconds property in $ref: + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param instance_close_delay_duration_in_seconds: Duration in seconds, to + wait before a stateless instance is closed, to allow the active requests + to drain gracefully. This would be effective when the instance is closing + during the application/cluster + upgrade, only for those instances which have a non-zero delay duration + configured in the service description. See + InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates - that the behavior will entirely depend on the delay configured in the stateless service - description. + Note, the default value of InstanceCloseDelayDurationInSeconds is + 4294967295, which indicates that the behavior will entirely depend on the + delay configured in the stateless service description. :type instance_close_delay_duration_in_seconds: long - :param managed_application_identity: Managed application identity description. + :param managed_application_identity: Managed application identity + description. :type managed_application_identity: ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ @@ -2806,22 +2642,19 @@ class ApplicationUpgradeDescription(msrest.serialization.Model): 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationUpgradeDescription, self).__init__(**kwargs) - self.name = kwargs['name'] - self.target_application_type_version = kwargs['target_application_type_version'] + self.name = kwargs.get('name', None) + self.target_application_type_version = kwargs.get('target_application_type_version', None) self.parameters = kwargs.get('parameters', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) - self.force_restart = kwargs.get('force_restart', False) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.force_restart = kwargs.get('force_restart', None) self.sort_order = kwargs.get('sort_order', "Default") self.monitoring_policy = kwargs.get('monitoring_policy', None) self.application_health_policy = kwargs.get('application_health_policy', None) - self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', 4294967295) + self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', None) self.managed_application_identity = kwargs.get('managed_application_identity', None) @@ -2830,63 +2663,47 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application type version. + :param current_application_type_version: Required. Current Application + type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type version. + :param application_type_version: Required. Target Application type + version. :type application_type_version: str :param upgrade_state: Required. State of upgrade. :type upgrade_state: str :param upgrade_domains: Required. Upgrade domains. :type upgrade_domains: str - :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain in milli-seconds. + :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain + in milli-seconds. :type upgrade_domain_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -2897,11 +2714,11 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -2911,79 +2728,86 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationUpgradeDomainCompletedEvent, self).__init__(**kwargs) - self.kind = 'ApplicationUpgradeDomainCompleted' # type: str - self.application_type_name = kwargs['application_type_name'] - self.current_application_type_version = kwargs['current_application_type_version'] - self.application_type_version = kwargs['application_type_version'] - self.upgrade_state = kwargs['upgrade_state'] - self.upgrade_domains = kwargs['upgrade_domains'] - self.upgrade_domain_elapsed_time_in_ms = kwargs['upgrade_domain_elapsed_time_in_ms'] + self.application_type_name = kwargs.get('application_type_name', None) + self.current_application_type_version = kwargs.get('current_application_type_version', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.upgrade_state = kwargs.get('upgrade_state', None) + self.upgrade_domains = kwargs.get('upgrade_domains', None) + self.upgrade_domain_elapsed_time_in_ms = kwargs.get('upgrade_domain_elapsed_time_in_ms', None) + self.kind = 'ApplicationUpgradeDomainCompleted' -class ApplicationUpgradeProgressInfo(msrest.serialization.Model): +class ApplicationUpgradeProgressInfo(Model): """Describes the parameters for an application upgrade. - :param name: The name of the target application, including the 'fabric:' URI scheme. + :param name: The name of the target application, including the 'fabric:' + URI scheme. :type name: str - :param type_name: The application type name as defined in the application manifest. + :param type_name: The application type name as defined in the application + manifest. :type type_name: str - :param target_application_type_version: The target application type version (found in the - application manifest) for the application upgrade. + :param target_application_type_version: The target application type + version (found in the application manifest) for the application upgrade. :type target_application_type_version: str :param upgrade_domains: List of upgrade domains and their statuses. :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] - :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", - "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", - "RollingForwardInProgress", "RollingForwardCompleted", "Failed". + :param upgrade_state: The state of the upgrade domain. Possible values + include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', + 'RollingForwardPending', 'RollingForwardInProgress', + 'RollingForwardCompleted', 'Failed' :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState - :param next_upgrade_domain: The name of the next upgrade domain to be processed. + :param next_upgrade_domain: The name of the next upgrade domain to be + processed. :type next_upgrade_domain: str - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_description: Describes the parameters for an application upgrade. Note that - upgrade description replaces the existing application description. This means that if the - parameters are not specified, the existing parameters on the applications will be overwritten - with the empty parameters list. This would result in the application using the default value of - the parameters from the application manifest. If you do not want to change any existing - parameter values, please get the application parameters first using the GetApplicationInfo - query and then supply those values as Parameters in this ApplicationUpgradeDescription. - :type upgrade_description: ~azure.servicefabric.models.ApplicationUpgradeDescription - :param upgrade_duration_in_milliseconds: The estimated total amount of time spent processing - the overall upgrade. + :param upgrade_description: Describes the parameters for an application + upgrade. Note that upgrade description replaces the existing application + description. This means that if the parameters are not specified, the + existing parameters on the applications will be overwritten with the empty + parameters list. This would result in the application using the default + value of the parameters from the application manifest. If you do not want + to change any existing parameter values, please get the application + parameters first using the GetApplicationInfo query and then supply those + values as Parameters in this ApplicationUpgradeDescription. + :type upgrade_description: + ~azure.servicefabric.models.ApplicationUpgradeDescription + :param upgrade_duration_in_milliseconds: The estimated total amount of + time spent processing the overall upgrade. :type upgrade_duration_in_milliseconds: str - :param upgrade_domain_duration_in_milliseconds: The estimated total amount of time spent - processing the current upgrade domain. + :param upgrade_domain_duration_in_milliseconds: The estimated total amount + of time spent processing the current upgrade domain. :type upgrade_domain_duration_in_milliseconds: str - :param unhealthy_evaluations: List of health evaluations that resulted in the current - aggregated health state. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current in-progress upgrade - domain. + :param unhealthy_evaluations: List of health evaluations that resulted in + the current aggregated health state. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current + in-progress upgrade domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo - :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade + started. :type start_timestamp_utc: str - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and - FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade + failed and FailureAction was executed. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being - executed. Possible values include: "None", "Interrupted", "HealthCheck", - "UpgradeDomainTimeout", "OverallUpgradeTimeout". + :param failure_reason: The cause of an upgrade failure that resulted in + FailureAction being executed. Possible values include: 'None', + 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', + 'OverallUpgradeTimeout' :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: Information about the upgrade domain progress at the - time of upgrade failure. + :param upgrade_domain_progress_at_failure: Information about the upgrade + domain progress at the time of upgrade failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo - :param upgrade_status_details: Additional detailed information about the status of the pending - upgrade. + :param upgrade_status_details: Additional detailed information about the + status of the pending upgrade. :type upgrade_status_details: str """ @@ -3007,10 +2831,7 @@ class ApplicationUpgradeProgressInfo(msrest.serialization.Model): 'upgrade_status_details': {'key': 'UpgradeStatusDetails', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationUpgradeProgressInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.type_name = kwargs.get('type_name', None) @@ -3036,44 +2857,25 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -3081,14 +2883,15 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): :type application_type_version: str :param failure_reason: Required. Describes reason of failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -3097,11 +2900,11 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, @@ -3109,16 +2912,13 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationUpgradeRollbackCompletedEvent, self).__init__(**kwargs) - self.kind = 'ApplicationUpgradeRollbackCompleted' # type: str - self.application_type_name = kwargs['application_type_name'] - self.application_type_version = kwargs['application_type_version'] - self.failure_reason = kwargs['failure_reason'] - self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ApplicationUpgradeRollbackCompleted' class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): @@ -3126,61 +2926,45 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application type version. + :param current_application_type_version: Required. Current Application + type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type version. + :param application_type_version: Required. Target Application type + version. :type application_type_version: str :param failure_reason: Required. Describes reason of failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -3190,11 +2974,11 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -3203,17 +2987,14 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationUpgradeRollbackStartedEvent, self).__init__(**kwargs) - self.kind = 'ApplicationUpgradeRollbackStarted' # type: str - self.application_type_name = kwargs['application_type_name'] - self.current_application_type_version = kwargs['current_application_type_version'] - self.application_type_version = kwargs['application_type_version'] - self.failure_reason = kwargs['failure_reason'] - self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] + self.application_type_name = kwargs.get('application_type_name', None) + self.current_application_type_version = kwargs.get('current_application_type_version', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ApplicationUpgradeRollbackStarted' class ApplicationUpgradeStartedEvent(ApplicationEvent): @@ -3221,50 +3002,33 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application type version. + :param current_application_type_version: Required. Current Application + type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type version. + :param application_type_version: Required. Target Application type + version. :type application_type_version: str :param upgrade_type: Required. Type of upgrade. :type upgrade_type: str @@ -3275,9 +3039,9 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -3288,11 +3052,11 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -3302,36 +3066,37 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): 'failure_action': {'key': 'FailureAction', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationUpgradeStartedEvent, self).__init__(**kwargs) - self.kind = 'ApplicationUpgradeStarted' # type: str - self.application_type_name = kwargs['application_type_name'] - self.current_application_type_version = kwargs['current_application_type_version'] - self.application_type_version = kwargs['application_type_version'] - self.upgrade_type = kwargs['upgrade_type'] - self.rolling_upgrade_mode = kwargs['rolling_upgrade_mode'] - self.failure_action = kwargs['failure_action'] + self.application_type_name = kwargs.get('application_type_name', None) + self.current_application_type_version = kwargs.get('current_application_type_version', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.upgrade_type = kwargs.get('upgrade_type', None) + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', None) + self.failure_action = kwargs.get('failure_action', None) + self.kind = 'ApplicationUpgradeStarted' -class ApplicationUpgradeUpdateDescription(msrest.serialization.Model): +class ApplicationUpgradeUpdateDescription(Model): """Describes the parameters for updating an ongoing application upgrade. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the application, including the 'fabric:' URI scheme. + :param name: Required. The name of the application, including the + 'fabric:' URI scheme. :type name: str - :param upgrade_kind: Required. The kind of upgrade out of the following possible values. - Possible values include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param application_health_policy: Defines a health policy used to evaluate the health of an - application or one of its children entities. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :param update_description: Describes the parameters for updating a rolling upgrade of - application or cluster. - :type update_description: ~azure.servicefabric.models.RollingUpgradeUpdateDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param update_description: Describes the parameters for updating a rolling + upgrade of application or cluster. + :type update_description: + ~azure.servicefabric.models.RollingUpgradeUpdateDescription """ _validation = { @@ -3346,28 +3111,25 @@ class ApplicationUpgradeUpdateDescription(msrest.serialization.Model): 'update_description': {'key': 'UpdateDescription', 'type': 'RollingUpgradeUpdateDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ApplicationUpgradeUpdateDescription, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.application_health_policy = kwargs.get('application_health_policy', None) self.update_description = kwargs.get('update_description', None) -class AutoScalingMetric(msrest.serialization.Model): - """Describes the metric that is used for triggering auto scaling operation. Derived classes will describe resources or metrics. +class AutoScalingMetric(Model): + """Describes the metric that is used for triggering auto scaling operation. + Derived classes will describe resources or metrics. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AutoScalingResourceMetric. + sub-classes are: AutoScalingResourceMetric All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling metric.Constant filled by server. Possible - values include: "Resource". - :type kind: str or ~azure.servicefabric.models.AutoScalingMetricKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -3382,25 +3144,23 @@ class AutoScalingMetric(msrest.serialization.Model): 'kind': {'Resource': 'AutoScalingResourceMetric'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AutoScalingMetric, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None -class AutoScalingPolicy(msrest.serialization.Model): +class AutoScalingPolicy(Model): """Describes the auto scaling policy. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the auto scaling policy. :type name: str - :param trigger: Required. Determines when auto scaling operation will be invoked. - :type trigger: ~azure.servicefabric.models.AutoScalingTrigger - :param mechanism: Required. The mechanism that is used to scale when auto scaling operation is + :param trigger: Required. Determines when auto scaling operation will be invoked. + :type trigger: ~azure.servicefabric.models.AutoScalingTrigger + :param mechanism: Required. The mechanism that is used to scale when auto + scaling operation is invoked. :type mechanism: ~azure.servicefabric.models.AutoScalingMechanism """ @@ -3416,14 +3176,11 @@ class AutoScalingPolicy(msrest.serialization.Model): 'mechanism': {'key': 'mechanism', 'type': 'AutoScalingMechanism'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AutoScalingPolicy, self).__init__(**kwargs) - self.name = kwargs['name'] - self.trigger = kwargs['trigger'] - self.mechanism = kwargs['mechanism'] + self.name = kwargs.get('name', None) + self.trigger = kwargs.get('trigger', None) + self.mechanism = kwargs.get('mechanism', None) class AutoScalingResourceMetric(AutoScalingMetric): @@ -3431,11 +3188,12 @@ class AutoScalingResourceMetric(AutoScalingMetric): All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling metric.Constant filled by server. Possible - values include: "Resource". - :type kind: str or ~azure.servicefabric.models.AutoScalingMetricKind - :param name: Required. Name of the resource. Possible values include: "cpu", "memoryInGB". - :type name: str or ~azure.servicefabric.models.AutoScalingResourceMetricName + :param kind: Required. Constant filled by server. + :type kind: str + :param name: Required. Name of the resource. Possible values include: + 'cpu', 'memoryInGB' + :type name: str or + ~azure.servicefabric.models.AutoScalingResourceMetricName """ _validation = { @@ -3448,26 +3206,22 @@ class AutoScalingResourceMetric(AutoScalingMetric): 'name': {'key': 'name', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AutoScalingResourceMetric, self).__init__(**kwargs) - self.kind = 'Resource' # type: str - self.name = kwargs['name'] + self.name = kwargs.get('name', None) + self.kind = 'Resource' -class AutoScalingTrigger(msrest.serialization.Model): +class AutoScalingTrigger(Model): """Describes the trigger for performing auto scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AverageLoadScalingTrigger. + sub-classes are: AverageLoadScalingTrigger All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling trigger.Constant filled by server. Possible - values include: "AverageLoad". - :type kind: str or ~azure.servicefabric.models.AutoScalingTriggerKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -3482,12 +3236,9 @@ class AutoScalingTrigger(msrest.serialization.Model): 'kind': {'AverageLoad': 'AverageLoadScalingTrigger'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AutoScalingTrigger, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class AverageLoadScalingTrigger(AutoScalingTrigger): @@ -3495,19 +3246,19 @@ class AverageLoadScalingTrigger(AutoScalingTrigger): All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling trigger.Constant filled by server. Possible - values include: "AverageLoad". - :type kind: str or ~azure.servicefabric.models.AutoScalingTriggerKind - :param metric: Required. Description of the metric that is used for scaling. + :param kind: Required. Constant filled by server. + :type kind: str + :param metric: Required. Description of the metric that is used for + scaling. :type metric: ~azure.servicefabric.models.AutoScalingMetric - :param lower_load_threshold: Required. Lower load threshold (if average load is below this - threshold, service will scale down). + :param lower_load_threshold: Required. Lower load threshold (if average + load is below this threshold, service will scale down). :type lower_load_threshold: float - :param upper_load_threshold: Required. Upper load threshold (if average load is above this - threshold, service will scale up). + :param upper_load_threshold: Required. Upper load threshold (if average + load is above this threshold, service will scale up). :type upper_load_threshold: float - :param scale_interval_in_seconds: Required. Scale interval that indicates how often will this - trigger be checked. + :param scale_interval_in_seconds: Required. Scale interval that indicates + how often will this trigger be checked. :type scale_interval_in_seconds: int """ @@ -3527,29 +3278,26 @@ class AverageLoadScalingTrigger(AutoScalingTrigger): 'scale_interval_in_seconds': {'key': 'scaleIntervalInSeconds', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AverageLoadScalingTrigger, self).__init__(**kwargs) - self.kind = 'AverageLoad' # type: str - self.metric = kwargs['metric'] - self.lower_load_threshold = kwargs['lower_load_threshold'] - self.upper_load_threshold = kwargs['upper_load_threshold'] - self.scale_interval_in_seconds = kwargs['scale_interval_in_seconds'] + self.metric = kwargs.get('metric', None) + self.lower_load_threshold = kwargs.get('lower_load_threshold', None) + self.upper_load_threshold = kwargs.get('upper_load_threshold', None) + self.scale_interval_in_seconds = kwargs.get('scale_interval_in_seconds', None) + self.kind = 'AverageLoad' -class ScalingTriggerDescription(msrest.serialization.Model): +class ScalingTriggerDescription(Model): """Describes the trigger for performing a scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AveragePartitionLoadScalingTrigger, AverageServiceLoadScalingTrigger. + sub-classes are: AveragePartitionLoadScalingTrigger, + AverageServiceLoadScalingTrigger All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. - Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". - :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -3564,32 +3312,30 @@ class ScalingTriggerDescription(msrest.serialization.Model): 'kind': {'AveragePartitionLoad': 'AveragePartitionLoadScalingTrigger', 'AverageServiceLoad': 'AverageServiceLoadScalingTrigger'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ScalingTriggerDescription, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): - """Represents a scaling trigger related to an average load of a metric/resource of a partition. + """Represents a scaling trigger related to an average load of a + metric/resource of a partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. - Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". - :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind - :param metric_name: Required. The name of the metric for which usage should be tracked. + :param kind: Required. Constant filled by server. + :type kind: str + :param metric_name: Required. The name of the metric for which usage + should be tracked. :type metric_name: str - :param lower_load_threshold: Required. The lower limit of the load below which a scale in - operation should be performed. + :param lower_load_threshold: Required. The lower limit of the load below + which a scale in operation should be performed. :type lower_load_threshold: str - :param upper_load_threshold: Required. The upper limit of the load beyond which a scale out - operation should be performed. + :param upper_load_threshold: Required. The upper limit of the load beyond + which a scale out operation should be performed. :type upper_load_threshold: str - :param scale_interval_in_seconds: Required. The period in seconds on which a decision is made - whether to scale or not. + :param scale_interval_in_seconds: Required. The period in seconds on which + a decision is made whether to scale or not. :type scale_interval_in_seconds: long """ @@ -3609,41 +3355,39 @@ class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AveragePartitionLoadScalingTrigger, self).__init__(**kwargs) - self.kind = 'AveragePartitionLoad' # type: str - self.metric_name = kwargs['metric_name'] - self.lower_load_threshold = kwargs['lower_load_threshold'] - self.upper_load_threshold = kwargs['upper_load_threshold'] - self.scale_interval_in_seconds = kwargs['scale_interval_in_seconds'] + self.metric_name = kwargs.get('metric_name', None) + self.lower_load_threshold = kwargs.get('lower_load_threshold', None) + self.upper_load_threshold = kwargs.get('upper_load_threshold', None) + self.scale_interval_in_seconds = kwargs.get('scale_interval_in_seconds', None) + self.kind = 'AveragePartitionLoad' class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): - """Represents a scaling policy related to an average load of a metric/resource of a service. + """Represents a scaling policy related to an average load of a metric/resource + of a service. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. - Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". - :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind - :param metric_name: Required. The name of the metric for which usage should be tracked. + :param kind: Required. Constant filled by server. + :type kind: str + :param metric_name: Required. The name of the metric for which usage + should be tracked. :type metric_name: str - :param lower_load_threshold: Required. The lower limit of the load below which a scale in - operation should be performed. + :param lower_load_threshold: Required. The lower limit of the load below + which a scale in operation should be performed. :type lower_load_threshold: str - :param upper_load_threshold: Required. The upper limit of the load beyond which a scale out - operation should be performed. + :param upper_load_threshold: Required. The upper limit of the load beyond + which a scale out operation should be performed. :type upper_load_threshold: str - :param scale_interval_in_seconds: Required. The period in seconds on which a decision is made - whether to scale or not. + :param scale_interval_in_seconds: Required. The period in seconds on which + a decision is made whether to scale or not. :type scale_interval_in_seconds: long - :param use_only_primary_load: Required. Flag determines whether only the load of primary - replica should be considered for scaling. - If set to true, then trigger will only consider the load of primary replicas of stateful - service. + :param use_only_primary_load: Required. Flag determines whether only the + load of primary replica should be considered for scaling. + If set to true, then trigger will only consider the load of primary + replicas of stateful service. If set to false, trigger will consider load of all replicas. This parameter cannot be set to true for stateless service. :type use_only_primary_load: bool @@ -3667,33 +3411,30 @@ class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): 'use_only_primary_load': {'key': 'UseOnlyPrimaryLoad', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AverageServiceLoadScalingTrigger, self).__init__(**kwargs) - self.kind = 'AverageServiceLoad' # type: str - self.metric_name = kwargs['metric_name'] - self.lower_load_threshold = kwargs['lower_load_threshold'] - self.upper_load_threshold = kwargs['upper_load_threshold'] - self.scale_interval_in_seconds = kwargs['scale_interval_in_seconds'] - self.use_only_primary_load = kwargs['use_only_primary_load'] + self.metric_name = kwargs.get('metric_name', None) + self.lower_load_threshold = kwargs.get('lower_load_threshold', None) + self.upper_load_threshold = kwargs.get('upper_load_threshold', None) + self.scale_interval_in_seconds = kwargs.get('scale_interval_in_seconds', None) + self.use_only_primary_load = kwargs.get('use_only_primary_load', None) + self.kind = 'AverageServiceLoad' -class BackupStorageDescription(msrest.serialization.Model): +class BackupStorageDescription(Model): """Describes the parameters for the backup storage. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AzureBlobBackupStorageDescription, DsmsAzureBlobBackupStorageDescription, FileShareBackupStorageDescription, ManagedIdentityAzureBlobBackupStorageDescription. + sub-classes are: AzureBlobBackupStorageDescription, + FileShareBackupStorageDescription, DsmsAzureBlobBackupStorageDescription, + ManagedIdentityAzureBlobBackupStorageDescription All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str """ _validation = { @@ -3701,38 +3442,35 @@ class BackupStorageDescription(msrest.serialization.Model): } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, } _subtype_map = { - 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'DsmsAzureBlobStore': 'DsmsAzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription', 'ManagedIdentityAzureBlobStore': 'ManagedIdentityAzureBlobBackupStorageDescription'} + 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription', 'DsmsAzureBlobStore': 'DsmsAzureBlobBackupStorageDescription', 'ManagedIdentityAzureBlobStore': 'ManagedIdentityAzureBlobBackupStorageDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BackupStorageDescription, self).__init__(**kwargs) - self.storage_kind = None # type: Optional[str] self.friendly_name = kwargs.get('friendly_name', None) + self.storage_kind = None class AzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Azure blob store used for storing and enumerating backups. + """Describes the parameters for Azure blob store used for storing and + enumerating backups. All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param connection_string: Required. The connection string to connect to the Azure blob store. + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param connection_string: Required. The connection string to connect to + the Azure blob store. :type connection_string: str - :param container_name: Required. The name of the container in the blob store to store and - enumerate backups from. + :param container_name: Required. The name of the container in the blob + store to store and enumerate backups from. :type container_name: str """ @@ -3743,37 +3481,34 @@ class AzureBlobBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'connection_string': {'key': 'ConnectionString', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AzureBlobBackupStorageDescription, self).__init__(**kwargs) - self.storage_kind = 'AzureBlobStore' # type: str - self.connection_string = kwargs['connection_string'] - self.container_name = kwargs['container_name'] + self.connection_string = kwargs.get('connection_string', None) + self.container_name = kwargs.get('container_name', None) + self.storage_kind = 'AzureBlobStore' -class DiagnosticsSinkProperties(msrest.serialization.Model): +class DiagnosticsSinkProperties(Model): """Properties of a DiagnosticsSink. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AzureInternalMonitoringPipelineSinkDescription. + sub-classes are: AzureInternalMonitoringPipelineSinkDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of DiagnosticsSink.Constant filled by server. Possible values - include: "Invalid", "AzureInternalMonitoringPipeline". - :type kind: str or ~azure.servicefabric.models.DiagnosticsSinkKind - :param name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription. + :param name: Name of the sink. This value is referenced by + DiagnosticsReferenceDescription :type name: str :param description: A description of the sink. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -3781,23 +3516,20 @@ class DiagnosticsSinkProperties(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'kind', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, } _subtype_map = { 'kind': {'AzureInternalMonitoringPipeline': 'AzureInternalMonitoringPipelineSinkDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DiagnosticsSinkProperties, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) + self.kind = None class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): @@ -3805,23 +3537,24 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of DiagnosticsSink.Constant filled by server. Possible values - include: "Invalid", "AzureInternalMonitoringPipeline". - :type kind: str or ~azure.servicefabric.models.DiagnosticsSinkKind - :param name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription. + :param name: Name of the sink. This value is referenced by + DiagnosticsReferenceDescription :type name: str :param description: A description of the sink. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param account_name: Azure Internal monitoring pipeline account. :type account_name: str :param namespace: Azure Internal monitoring pipeline account namespace. :type namespace: str :param ma_config_url: Azure Internal monitoring agent configuration. :type ma_config_url: str - :param fluentd_config_url: Azure Internal monitoring agent fluentd configuration. + :param fluentd_config_url: Azure Internal monitoring agent fluentd + configuration. :type fluentd_config_url: str - :param auto_key_config_url: Azure Internal monitoring pipeline autokey associated with the - certificate. + :param auto_key_config_url: Azure Internal monitoring pipeline autokey + associated with the certificate. :type auto_key_config_url: str """ @@ -3830,9 +3563,9 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): } _attribute_map = { - 'kind': {'key': 'kind', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, 'account_name': {'key': 'accountName', 'type': 'str'}, 'namespace': {'key': 'namespace', 'type': 'str'}, 'ma_config_url': {'key': 'maConfigUrl', 'type': 'str'}, @@ -3840,49 +3573,53 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): 'auto_key_config_url': {'key': 'autoKeyConfigUrl', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(AzureInternalMonitoringPipelineSinkDescription, self).__init__(**kwargs) - self.kind = 'AzureInternalMonitoringPipeline' # type: str self.account_name = kwargs.get('account_name', None) self.namespace = kwargs.get('namespace', None) self.ma_config_url = kwargs.get('ma_config_url', None) self.fluentd_config_url = kwargs.get('fluentd_config_url', None) self.auto_key_config_url = kwargs.get('auto_key_config_url', None) + self.kind = 'AzureInternalMonitoringPipeline' -class BackupInfo(msrest.serialization.Model): +class BackupInfo(Model): """Represents a backup point which can be used to trigger a restore. :param backup_id: Unique backup ID . :type backup_id: str - :param backup_chain_id: Unique backup chain ID. All backups part of the same chain has the same - backup chain id. A backup chain is comprised of 1 full backup and multiple incremental backups. + :param backup_chain_id: Unique backup chain ID. All backups part of the + same chain has the same backup chain id. A backup chain is comprised of 1 + full backup and multiple incremental backups. :type backup_chain_id: str - :param application_name: Name of the Service Fabric application this partition backup belongs - to. + :param application_name: Name of the Service Fabric application this + partition backup belongs to. :type application_name: str - :param service_name: Name of the Service Fabric service this partition backup belongs to. + :param service_name: Name of the Service Fabric service this partition + backup belongs to. :type service_name: str - :param partition_information: Information about the partition to which this backup belongs to. - :type partition_information: ~azure.servicefabric.models.PartitionInformation - :param backup_location: Location of the backup, relative to the backup store. + :param partition_information: Information about the partition to which + this backup belongs to + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param backup_location: Location of the backup, relative to the backup + store. :type backup_location: str - :param backup_type: Describes the type of backup, whether its full or incremental. Possible - values include: "Invalid", "Full", "Incremental". + :param backup_type: Describes the type of backup, whether its full or + incremental. Possible values include: 'Invalid', 'Full', 'Incremental' :type backup_type: str or ~azure.servicefabric.models.BackupType - :param epoch_of_last_backup_record: Epoch of the last record in this backup. + :param epoch_of_last_backup_record: Epoch of the last record in this + backup. :type epoch_of_last_backup_record: ~azure.servicefabric.models.Epoch :param lsn_of_last_backup_record: LSN of the last record in this backup. :type lsn_of_last_backup_record: str :param creation_time_utc: The date time when this backup was taken. - :type creation_time_utc: ~datetime.datetime - :param service_manifest_version: Manifest Version of the service this partition backup belongs - to. + :type creation_time_utc: datetime + :param service_manifest_version: Manifest Version of the service this + partition backup belongs to. :type service_manifest_version: str - :param failure_error: Denotes the failure encountered in getting backup point information. + :param failure_error: Denotes the failure encountered in getting backup + point information. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -3901,10 +3638,7 @@ class BackupInfo(msrest.serialization.Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BackupInfo, self).__init__(**kwargs) self.backup_id = kwargs.get('backup_id', None) self.backup_chain_id = kwargs.get('backup_chain_id', None) @@ -3920,10 +3654,11 @@ def __init__( self.failure_error = kwargs.get('failure_error', None) -class BackupPartitionDescription(msrest.serialization.Model): +class BackupPartitionDescription(Model): """Describes the parameters for triggering partition's backup. - :param backup_storage: Specifies the details of the backup storage where to save the backup. + :param backup_storage: Specifies the details of the backup storage where + to save the backup. :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription """ @@ -3931,40 +3666,39 @@ class BackupPartitionDescription(msrest.serialization.Model): 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BackupPartitionDescription, self).__init__(**kwargs) self.backup_storage = kwargs.get('backup_storage', None) -class BackupPolicyDescription(msrest.serialization.Model): +class BackupPolicyDescription(Model): """Describes a backup policy for configuring periodic backup. All required parameters must be populated in order to send to Azure. :param name: Required. The unique name identifying this backup policy. :type name: str - :param auto_restore_on_data_loss: Required. Specifies whether to trigger restore automatically - using the latest available backup in case the partition experiences a data loss event. + :param auto_restore_on_data_loss: Required. Specifies whether to trigger + restore automatically using the latest available backup in case the + partition experiences a data loss event. :type auto_restore_on_data_loss: bool - :param max_incremental_backups: Required. Defines the maximum number of incremental backups to - be taken between two full backups. This is just the upper limit. A full backup may be taken - before specified number of incremental backups are completed in one of the following conditions - - - * The replica has never taken a full backup since it has become primary, - * Some of the log records since the last backup has been truncated, or - * Replica passed the MaxAccumulatedBackupLogSizeInMB limit. + :param max_incremental_backups: Required. Defines the maximum number of + incremental backups to be taken between two full backups. This is just the + upper limit. A full backup may be taken before specified number of + incremental backups are completed in one of the following conditions + - The replica has never taken a full backup since it has become primary, + - Some of the log records since the last backup has been truncated, or + - Replica passed the MaxAccumulatedBackupLogSizeInMB limit. :type max_incremental_backups: int :param schedule: Required. Describes the backup schedule parameters. :type schedule: ~azure.servicefabric.models.BackupScheduleDescription - :param storage: Required. Describes the details of backup storage where to store the periodic - backups. + :param storage: Required. Describes the details of backup storage where to + store the periodic backups. :type storage: ~azure.servicefabric.models.BackupStorageDescription - :param retention_policy: Describes the policy to retain backups in storage. - :type retention_policy: ~azure.servicefabric.models.RetentionPolicyDescription + :param retention_policy: Describes the policy to retain backups in + storage. + :type retention_policy: + ~azure.servicefabric.models.RetentionPolicyDescription """ _validation = { @@ -3984,36 +3718,39 @@ class BackupPolicyDescription(msrest.serialization.Model): 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicyDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BackupPolicyDescription, self).__init__(**kwargs) - self.name = kwargs['name'] - self.auto_restore_on_data_loss = kwargs['auto_restore_on_data_loss'] - self.max_incremental_backups = kwargs['max_incremental_backups'] - self.schedule = kwargs['schedule'] - self.storage = kwargs['storage'] + self.name = kwargs.get('name', None) + self.auto_restore_on_data_loss = kwargs.get('auto_restore_on_data_loss', None) + self.max_incremental_backups = kwargs.get('max_incremental_backups', None) + self.schedule = kwargs.get('schedule', None) + self.storage = kwargs.get('storage', None) self.retention_policy = kwargs.get('retention_policy', None) -class BackupProgressInfo(msrest.serialization.Model): +class BackupProgressInfo(Model): """Describes the progress of a partition's backup. - :param backup_state: Represents the current state of the partition backup operation. Possible - values include: "Invalid", "Accepted", "BackupInProgress", "Success", "Failure", "Timeout". + :param backup_state: Represents the current state of the partition backup + operation. Possible values include: 'Invalid', 'Accepted', + 'BackupInProgress', 'Success', 'Failure', 'Timeout' :type backup_state: str or ~azure.servicefabric.models.BackupState - :param time_stamp_utc: TimeStamp in UTC when operation succeeded or failed. - :type time_stamp_utc: ~datetime.datetime + :param time_stamp_utc: TimeStamp in UTC when operation succeeded or + failed. + :type time_stamp_utc: datetime :param backup_id: Unique ID of the newly created backup. :type backup_id: str - :param backup_location: Location, relative to the backup store, of the newly created backup. + :param backup_location: Location, relative to the backup store, of the + newly created backup. :type backup_location: str - :param epoch_of_last_backup_record: Specifies the epoch of the last record included in backup. + :param epoch_of_last_backup_record: Specifies the epoch of the last record + included in backup. :type epoch_of_last_backup_record: ~azure.servicefabric.models.Epoch - :param lsn_of_last_backup_record: The LSN of last record included in backup. + :param lsn_of_last_backup_record: The LSN of last record included in + backup. :type lsn_of_last_backup_record: str - :param failure_error: Denotes the failure encountered in performing backup operation. + :param failure_error: Denotes the failure encountered in performing backup + operation. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -4027,10 +3764,7 @@ class BackupProgressInfo(msrest.serialization.Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BackupProgressInfo, self).__init__(**kwargs) self.backup_state = kwargs.get('backup_state', None) self.time_stamp_utc = kwargs.get('time_stamp_utc', None) @@ -4041,18 +3775,17 @@ def __init__( self.failure_error = kwargs.get('failure_error', None) -class BackupScheduleDescription(msrest.serialization.Model): +class BackupScheduleDescription(Model): """Describes the backup schedule parameters. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FrequencyBasedBackupScheduleDescription, TimeBasedBackupScheduleDescription. + sub-classes are: FrequencyBasedBackupScheduleDescription, + TimeBasedBackupScheduleDescription All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. The kind of backup schedule, time based or frequency - based.Constant filled by server. Possible values include: "Invalid", "TimeBased", - "FrequencyBased". - :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str """ _validation = { @@ -4067,22 +3800,22 @@ class BackupScheduleDescription(msrest.serialization.Model): 'schedule_kind': {'FrequencyBased': 'FrequencyBasedBackupScheduleDescription', 'TimeBased': 'TimeBasedBackupScheduleDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BackupScheduleDescription, self).__init__(**kwargs) - self.schedule_kind = None # type: Optional[str] + self.schedule_kind = None -class BackupSuspensionInfo(msrest.serialization.Model): +class BackupSuspensionInfo(Model): """Describes the backup suspension details. - :param is_suspended: Indicates whether periodic backup is suspended at this level or not. + :param is_suspended: Indicates whether periodic backup is suspended at + this level or not. :type is_suspended: bool - :param suspension_inherited_from: Specifies the scope at which the backup suspension was - applied. Possible values include: "Invalid", "Partition", "Service", "Application". - :type suspension_inherited_from: str or ~azure.servicefabric.models.BackupSuspensionScope + :param suspension_inherited_from: Specifies the scope at which the backup + suspension was applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type suspension_inherited_from: str or + ~azure.servicefabric.models.BackupSuspensionScope """ _attribute_map = { @@ -4090,27 +3823,22 @@ class BackupSuspensionInfo(msrest.serialization.Model): 'suspension_inherited_from': {'key': 'SuspensionInheritedFrom', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BackupSuspensionInfo, self).__init__(**kwargs) self.is_suspended = kwargs.get('is_suspended', None) self.suspension_inherited_from = kwargs.get('suspension_inherited_from', None) -class RetentionPolicyDescription(msrest.serialization.Model): +class RetentionPolicyDescription(Model): """Describes the retention policy configured. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: BasicRetentionPolicyDescription. + sub-classes are: BasicRetentionPolicyDescription All required parameters must be populated in order to send to Azure. - :param retention_policy_type: Required. The type of retention policy. Currently only "Basic" - retention policy is supported.Constant filled by server. Possible values include: "Basic", - "Invalid". - :type retention_policy_type: str or ~azure.servicefabric.models.RetentionPolicyType + :param retention_policy_type: Required. Constant filled by server. + :type retention_policy_type: str """ _validation = { @@ -4125,12 +3853,9 @@ class RetentionPolicyDescription(msrest.serialization.Model): 'retention_policy_type': {'Basic': 'BasicRetentionPolicyDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RetentionPolicyDescription, self).__init__(**kwargs) - self.retention_policy_type = None # type: Optional[str] + self.retention_policy_type = None class BasicRetentionPolicyDescription(RetentionPolicyDescription): @@ -4138,17 +3863,16 @@ class BasicRetentionPolicyDescription(RetentionPolicyDescription): All required parameters must be populated in order to send to Azure. - :param retention_policy_type: Required. The type of retention policy. Currently only "Basic" - retention policy is supported.Constant filled by server. Possible values include: "Basic", - "Invalid". - :type retention_policy_type: str or ~azure.servicefabric.models.RetentionPolicyType - :param retention_duration: Required. It is the minimum duration for which a backup created, - will remain stored in the storage and might get deleted after that span of time. It should be - specified in ISO8601 format. - :type retention_duration: ~datetime.timedelta - :param minimum_number_of_backups: It is the minimum number of backups to be retained at any - point of time. If specified with a non zero value, backups will not be deleted even if the - backups have gone past retention duration and have number of backups less than or equal to it. + :param retention_policy_type: Required. Constant filled by server. + :type retention_policy_type: str + :param retention_duration: Required. It is the minimum duration for which + a backup created, will remain stored in the storage and might get deleted + after that span of time. It should be specified in ISO8601 format. + :type retention_duration: timedelta + :param minimum_number_of_backups: It is the minimum number of backups to + be retained at any point of time. If specified with a non zero value, + backups will not be deleted even if the backups have gone past retention + duration and have number of backups less than or equal to it. :type minimum_number_of_backups: int """ @@ -4164,28 +3888,24 @@ class BasicRetentionPolicyDescription(RetentionPolicyDescription): 'minimum_number_of_backups': {'key': 'MinimumNumberOfBackups', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BasicRetentionPolicyDescription, self).__init__(**kwargs) - self.retention_policy_type = 'Basic' # type: str - self.retention_duration = kwargs['retention_duration'] + self.retention_duration = kwargs.get('retention_duration', None) self.minimum_number_of_backups = kwargs.get('minimum_number_of_backups', None) + self.retention_policy_type = 'Basic' -class PropertyValue(msrest.serialization.Model): +class PropertyValue(Model): """Describes a Service Fabric property value. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: BinaryPropertyValue, DoublePropertyValue, GuidPropertyValue, Int64PropertyValue, StringPropertyValue. + sub-classes are: BinaryPropertyValue, Int64PropertyValue, + DoublePropertyValue, StringPropertyValue, GuidPropertyValue All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -4197,15 +3917,12 @@ class PropertyValue(msrest.serialization.Model): } _subtype_map = { - 'kind': {'Binary': 'BinaryPropertyValue', 'Double': 'DoublePropertyValue', 'Guid': 'GuidPropertyValue', 'Int64': 'Int64PropertyValue', 'String': 'StringPropertyValue'} + 'kind': {'Binary': 'BinaryPropertyValue', 'Int64': 'Int64PropertyValue', 'Double': 'DoublePropertyValue', 'String': 'StringPropertyValue', 'Guid': 'GuidPropertyValue'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PropertyValue, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class BinaryPropertyValue(PropertyValue): @@ -4213,12 +3930,10 @@ class BinaryPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind - :param data: Required. Array of bytes to be sent as an integer array. Each element of array is - a number between 0 and 255. + :param kind: Required. Constant filled by server. + :type kind: str + :param data: Required. Array of bytes to be sent as an integer array. Each + element of array is a number between 0 and 255. :type data: list[int] """ @@ -4232,26 +3947,25 @@ class BinaryPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': '[int]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(BinaryPropertyValue, self).__init__(**kwargs) - self.kind = 'Binary' # type: str - self.data = kwargs['data'] + self.data = kwargs.get('data', None) + self.kind = 'Binary' -class Chaos(msrest.serialization.Model): +class Chaos(Model): """Contains a description of Chaos. - :param chaos_parameters: If Chaos is running, these are the parameters Chaos is running with. + :param chaos_parameters: If Chaos is running, these are the parameters + Chaos is running with. :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters - :param status: Current status of the Chaos run. Possible values include: "Invalid", "Running", - "Stopped". + :param status: Current status of the Chaos run. Possible values include: + 'Invalid', 'Running', 'Stopped' :type status: str or ~azure.servicefabric.models.ChaosStatus - :param schedule_status: Current status of the schedule. Possible values include: "Invalid", - "Stopped", "Active", "Expired", "Pending". - :type schedule_status: str or ~azure.servicefabric.models.ChaosScheduleStatus + :param schedule_status: Current status of the schedule. Possible values + include: 'Invalid', 'Stopped', 'Active', 'Expired', 'Pending' + :type schedule_status: str or + ~azure.servicefabric.models.ChaosScheduleStatus """ _attribute_map = { @@ -4260,10 +3974,7 @@ class Chaos(msrest.serialization.Model): 'schedule_status': {'key': 'ScheduleStatus', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(Chaos, self).__init__(**kwargs) self.chaos_parameters = kwargs.get('chaos_parameters', None) self.status = kwargs.get('status', None) @@ -4275,44 +3986,25 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4324,14 +4016,15 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): :type service_manifest_name: str :param code_package_name: Required. Code package name. :type code_package_name: str - :param service_package_activation_id: Required. Id of Service package activation. + :param service_package_activation_id: Required. Id of Service package + activation. :type service_package_activation_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4342,11 +4035,11 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -4356,26 +4049,27 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosCodePackageRestartScheduledEvent, self).__init__(**kwargs) - self.kind = 'ChaosCodePackageRestartScheduled' # type: str - self.fault_group_id = kwargs['fault_group_id'] - self.fault_id = kwargs['fault_id'] - self.node_name = kwargs['node_name'] - self.service_manifest_name = kwargs['service_manifest_name'] - self.code_package_name = kwargs['code_package_name'] - self.service_package_activation_id = kwargs['service_package_activation_id'] + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.node_name = kwargs.get('node_name', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.kind = 'ChaosCodePackageRestartScheduled' -class ChaosContext(msrest.serialization.Model): - """Describes a map, which is a collection of (string, string) type key-value pairs. The map can be used to record information about -the Chaos run. There cannot be more than 100 such pairs and each string (key or value) can be at most 4095 characters long. -This map is set by the starter of the Chaos run to optionally store the context about the specific run. +class ChaosContext(Model): + """Describes a map, which is a collection of (string, string) type key-value + pairs. The map can be used to record information about + the Chaos run. There cannot be more than 100 such pairs and each string + (key or value) can be at most 4095 characters long. + This map is set by the starter of the Chaos run to optionally store the + context about the specific run. - :param map: Describes a map that contains a collection of ChaosContextMapItem's. + :param map: Describes a map that contains a collection of + ChaosContextMapItem's. :type map: dict[str, str] """ @@ -4383,61 +4077,58 @@ class ChaosContext(msrest.serialization.Model): 'map': {'key': 'Map', 'type': '{str}'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosContext, self).__init__(**kwargs) self.map = kwargs.get('map', None) -class ChaosEvent(msrest.serialization.Model): +class ChaosEvent(Model): """Represents an event generated during a Chaos run. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ExecutingFaultsChaosEvent, StartedChaosEvent, StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, WaitingChaosEvent. + sub-classes are: ExecutingFaultsChaosEvent, StartedChaosEvent, + StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, + WaitingChaosEvent All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'ExecutingFaults': 'ExecutingFaultsChaosEvent', 'Started': 'StartedChaosEvent', 'Stopped': 'StoppedChaosEvent', 'TestError': 'TestErrorChaosEvent', 'ValidationFailed': 'ValidationFailedChaosEvent', 'Waiting': 'WaitingChaosEvent'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosEvent, self).__init__(**kwargs) - self.kind = None # type: Optional[str] - self.time_stamp_utc = kwargs['time_stamp_utc'] + self.time_stamp_utc = kwargs.get('time_stamp_utc', None) + self.kind = None -class ChaosEventsSegment(msrest.serialization.Model): - """Contains the list of Chaos events and the continuation token to get the next segment. +class ChaosEventsSegment(Model): + """Contains the list of Chaos events and the continuation token to get the + next segment. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param history: List of Chaos events that meet the user-supplied criteria. :type history: list[~azure.servicefabric.models.ChaosEventWrapper] @@ -4448,16 +4139,13 @@ class ChaosEventsSegment(msrest.serialization.Model): 'history': {'key': 'History', 'type': '[ChaosEventWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosEventsSegment, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.history = kwargs.get('history', None) -class ChaosEventWrapper(msrest.serialization.Model): +class ChaosEventWrapper(Model): """Wrapper object for Chaos event. :param chaos_event: Represents an event generated during a Chaos run. @@ -4468,10 +4156,7 @@ class ChaosEventWrapper(msrest.serialization.Model): 'chaos_event': {'key': 'ChaosEvent', 'type': 'ChaosEvent'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosEventWrapper, self).__init__(**kwargs) self.chaos_event = kwargs.get('chaos_event', None) @@ -4480,73 +4165,54 @@ class NodeEvent(FabricEvent): """Represents the base for all Node Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ChaosNodeRestartScheduledEvent, NodeAbortedEvent, NodeAddedToClusterEvent, NodeClosedEvent, NodeDeactivateCompletedEvent, NodeDeactivateStartedEvent, NodeDownEvent, NodeHealthReportExpiredEvent, NodeNewHealthReportEvent, NodeOpenFailedEvent, NodeOpenSucceededEvent, NodeRemovedFromClusterEvent, NodeUpEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: NodeAbortedEvent, NodeAddedToClusterEvent, + NodeClosedEvent, NodeDeactivateCompletedEvent, NodeDeactivateStartedEvent, + NodeDownEvent, NodeNewHealthReportEvent, NodeHealthReportExpiredEvent, + NodeOpenSucceededEvent, NodeOpenFailedEvent, NodeRemovedFromClusterEvent, + NodeUpEvent, ChaosNodeRestartScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, } _subtype_map = { - 'kind': {'ChaosNodeRestartScheduled': 'ChaosNodeRestartScheduledEvent', 'NodeAborted': 'NodeAbortedEvent', 'NodeAddedToCluster': 'NodeAddedToClusterEvent', 'NodeClosed': 'NodeClosedEvent', 'NodeDeactivateCompleted': 'NodeDeactivateCompletedEvent', 'NodeDeactivateStarted': 'NodeDeactivateStartedEvent', 'NodeDown': 'NodeDownEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeNewHealthReport': 'NodeNewHealthReportEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeOpenSucceeded': 'NodeOpenSucceededEvent', 'NodeRemovedFromCluster': 'NodeRemovedFromClusterEvent', 'NodeUp': 'NodeUpEvent'} + 'kind': {'NodeAborted': 'NodeAbortedEvent', 'NodeAddedToCluster': 'NodeAddedToClusterEvent', 'NodeClosed': 'NodeClosedEvent', 'NodeDeactivateCompleted': 'NodeDeactivateCompletedEvent', 'NodeDeactivateStarted': 'NodeDeactivateStartedEvent', 'NodeDown': 'NodeDownEvent', 'NodeNewHealthReport': 'NodeNewHealthReportEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeOpenSucceeded': 'NodeOpenSucceededEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeRemovedFromCluster': 'NodeRemovedFromClusterEvent', 'NodeUp': 'NodeUpEvent', 'ChaosNodeRestartScheduled': 'ChaosNodeRestartScheduledEvent'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeEvent, self).__init__(**kwargs) - self.kind = 'NodeEvent' # type: str - self.node_name = kwargs['node_name'] + self.node_name = kwargs.get('node_name', None) + self.kind = 'NodeEvent' class ChaosNodeRestartScheduledEvent(NodeEvent): @@ -4554,38 +4220,18 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -4597,9 +4243,9 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -4607,79 +4253,85 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosNodeRestartScheduledEvent, self).__init__(**kwargs) - self.kind = 'ChaosNodeRestartScheduled' # type: str - self.node_instance_id = kwargs['node_instance_id'] - self.fault_group_id = kwargs['fault_group_id'] - self.fault_id = kwargs['fault_id'] + self.node_instance_id = kwargs.get('node_instance_id', None) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.kind = 'ChaosNodeRestartScheduled' -class ChaosParameters(msrest.serialization.Model): +class ChaosParameters(Model): """Defines all the parameters to configure a Chaos run. - :param time_to_run_in_seconds: Total time (in seconds) for which Chaos will run before - automatically stopping. The maximum allowed value is 4,294,967,295 (System.UInt32.MaxValue). + :param time_to_run_in_seconds: Total time (in seconds) for which Chaos + will run before automatically stopping. The maximum allowed value is + 4,294,967,295 (System.UInt32.MaxValue). Default value: "4294967295" . :type time_to_run_in_seconds: str - :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of time to wait for all - cluster entities to become stable and healthy. Chaos executes in iterations and at the start of - each iteration it validates the health of cluster entities. + :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of + time to wait for all cluster entities to become stable and healthy. Chaos + executes in iterations and at the start of each iteration it validates the + health of cluster entities. During validation if a cluster entity is not stable and healthy within - MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation failed event. + MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation + failed event. Default value: 60 . :type max_cluster_stabilization_timeout_in_seconds: long - :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of concurrent faults - induced per iteration. - Chaos executes in iterations and two consecutive iterations are separated by a validation - phase. - The higher the concurrency, the more aggressive the injection of faults, leading to inducing - more complex series of states to uncover bugs. - The recommendation is to start with a value of 2 or 3 and to exercise caution while moving up. + :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of + concurrent faults induced per iteration. + Chaos executes in iterations and two consecutive iterations are separated + by a validation phase. + The higher the concurrency, the more aggressive the injection of faults, + leading to inducing more complex series of states to uncover bugs. + The recommendation is to start with a value of 2 or 3 and to exercise + caution while moving up. Default value: 1 . :type max_concurrent_faults: long - :param enable_move_replica_faults: Enables or disables the move primary and move secondary - faults. + :param enable_move_replica_faults: Enables or disables the move primary + and move secondary faults. Default value: True . :type enable_move_replica_faults: bool - :param wait_time_between_faults_in_seconds: Wait time (in seconds) between consecutive faults - within a single iteration. - The larger the value, the lower the overlapping between faults and the simpler the sequence of - state transitions that the cluster goes through. - The recommendation is to start with a value between 1 and 5 and exercise caution while moving - up. + :param wait_time_between_faults_in_seconds: Wait time (in seconds) between + consecutive faults within a single iteration. + The larger the value, the lower the overlapping between faults and the + simpler the sequence of state transitions that the cluster goes through. + The recommendation is to start with a value between 1 and 5 and exercise + caution while moving up. Default value: 20 . :type wait_time_between_faults_in_seconds: long - :param wait_time_between_iterations_in_seconds: Time-separation (in seconds) between two - consecutive iterations of Chaos. - The larger the value, the lower the fault injection rate. + :param wait_time_between_iterations_in_seconds: Time-separation (in + seconds) between two consecutive iterations of Chaos. + The larger the value, the lower the fault injection rate. Default value: + 30 . :type wait_time_between_iterations_in_seconds: long - :param cluster_health_policy: Passed-in cluster health policy is used to validate health of the - cluster in between Chaos iterations. If the cluster health is in error or if an unexpected - exception happens during fault execution--to provide the cluster with some time to - recuperate--Chaos will wait for 30 minutes before the next health-check. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param context: Describes a map, which is a collection of (string, string) type key-value - pairs. The map can be used to record information about - the Chaos run. There cannot be more than 100 such pairs and each string (key or value) can be - at most 4095 characters long. - This map is set by the starter of the Chaos run to optionally store the context about the - specific run. + :param cluster_health_policy: Passed-in cluster health policy is used to + validate health of the cluster in between Chaos iterations. If the cluster + health is in error or if an unexpected exception happens during fault + execution--to provide the cluster with some time to recuperate--Chaos will + wait for 30 minutes before the next health-check. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param context: Describes a map, which is a collection of (string, string) + type key-value pairs. The map can be used to record information about + the Chaos run. There cannot be more than 100 such pairs and each string + (key or value) can be at most 4095 characters long. + This map is set by the starter of the Chaos run to optionally store the + context about the specific run. :type context: ~azure.servicefabric.models.ChaosContext - :param chaos_target_filter: List of cluster entities to target for Chaos faults. - This filter can be used to target Chaos faults only to certain node types or only to certain - application instances. If ChaosTargetFilter is not used, Chaos faults all cluster entities. - If ChaosTargetFilter is used, Chaos faults only the entities that meet the ChaosTargetFilter - specification. + :param chaos_target_filter: List of cluster entities to target for Chaos + faults. + This filter can be used to target Chaos faults only to certain node types + or only to certain application instances. If ChaosTargetFilter is not + used, Chaos faults all cluster entities. + If ChaosTargetFilter is used, Chaos faults only the entities that meet the + ChaosTargetFilter specification. :type chaos_target_filter: ~azure.servicefabric.models.ChaosTargetFilter """ @@ -4702,10 +4354,7 @@ class ChaosParameters(msrest.serialization.Model): 'chaos_target_filter': {'key': 'ChaosTargetFilter', 'type': 'ChaosTargetFilter'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosParameters, self).__init__(**kwargs) self.time_to_run_in_seconds = kwargs.get('time_to_run_in_seconds', "4294967295") self.max_cluster_stabilization_timeout_in_seconds = kwargs.get('max_cluster_stabilization_timeout_in_seconds', 60) @@ -4718,15 +4367,16 @@ def __init__( self.chaos_target_filter = kwargs.get('chaos_target_filter', None) -class ChaosParametersDictionaryItem(msrest.serialization.Model): +class ChaosParametersDictionaryItem(Model): """Defines an item in ChaosParametersDictionary of the Chaos Schedule. All required parameters must be populated in order to send to Azure. - :param key: Required. The key identifying the Chaos Parameter in the dictionary. This key is - referenced by Chaos Schedule Jobs. + :param key: Required. The key identifying the Chaos Parameter in the + dictionary. This key is referenced by Chaos Schedule Jobs. :type key: str - :param value: Required. Defines all the parameters to configure a Chaos run. + :param value: Required. Defines all the parameters to configure a Chaos + run. :type value: ~azure.servicefabric.models.ChaosParameters """ @@ -4740,89 +4390,67 @@ class ChaosParametersDictionaryItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'ChaosParameters'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosParametersDictionaryItem, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) class PartitionEvent(FabricEvent): """Represents the base for all Partition Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ChaosPartitionPrimaryMoveScheduledEvent, ChaosPartitionSecondaryMoveScheduledEvent, PartitionAnalysisEvent, PartitionHealthReportExpiredEvent, PartitionNewHealthReportEvent, PartitionReconfiguredEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: PartitionAnalysisEvent, PartitionNewHealthReportEvent, + PartitionHealthReportExpiredEvent, PartitionReconfiguredEvent, + ChaosPartitionSecondaryMoveScheduledEvent, + ChaosPartitionPrimaryMoveScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ChaosPartitionPrimaryMoveScheduled': 'ChaosPartitionPrimaryMoveScheduledEvent', 'ChaosPartitionSecondaryMoveScheduled': 'ChaosPartitionSecondaryMoveScheduledEvent', 'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionNewHealthReport': 'PartitionNewHealthReportEvent', 'PartitionReconfigured': 'PartitionReconfiguredEvent'} + 'kind': {'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionNewHealthReport': 'PartitionNewHealthReportEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionReconfigured': 'PartitionReconfiguredEvent', 'ChaosPartitionSecondaryMoveScheduled': 'ChaosPartitionSecondaryMoveScheduledEvent', 'ChaosPartitionPrimaryMoveScheduled': 'ChaosPartitionPrimaryMoveScheduledEvent'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionEvent, self).__init__(**kwargs) - self.kind = 'PartitionEvent' # type: str - self.partition_id = kwargs['partition_id'] + self.partition_id = kwargs.get('partition_id', None) + self.kind = 'PartitionEvent' class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): @@ -4830,42 +4458,23 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4880,9 +4489,9 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4892,11 +4501,11 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -4905,17 +4514,14 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosPartitionPrimaryMoveScheduledEvent, self).__init__(**kwargs) - self.kind = 'ChaosPartitionPrimaryMoveScheduled' # type: str - self.fault_group_id = kwargs['fault_group_id'] - self.fault_id = kwargs['fault_id'] - self.service_name = kwargs['service_name'] - self.node_to = kwargs['node_to'] - self.forced_move = kwargs['forced_move'] + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.service_name = kwargs.get('service_name', None) + self.node_to = kwargs.get('node_to', None) + self.forced_move = kwargs.get('forced_move', None) + self.kind = 'ChaosPartitionPrimaryMoveScheduled' class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): @@ -4923,42 +4529,23 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4975,9 +4562,9 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4988,11 +4575,11 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -5002,103 +4589,84 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosPartitionSecondaryMoveScheduledEvent, self).__init__(**kwargs) - self.kind = 'ChaosPartitionSecondaryMoveScheduled' # type: str - self.fault_group_id = kwargs['fault_group_id'] - self.fault_id = kwargs['fault_id'] - self.service_name = kwargs['service_name'] - self.source_node = kwargs['source_node'] - self.destination_node = kwargs['destination_node'] - self.forced_move = kwargs['forced_move'] + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.service_name = kwargs.get('service_name', None) + self.source_node = kwargs.get('source_node', None) + self.destination_node = kwargs.get('destination_node', None) + self.forced_move = kwargs.get('forced_move', None) + self.kind = 'ChaosPartitionSecondaryMoveScheduled' class ReplicaEvent(FabricEvent): """Represents the base for all Replica Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ChaosReplicaRemovalScheduledEvent, ChaosReplicaRestartScheduledEvent, StatefulReplicaHealthReportExpiredEvent, StatefulReplicaNewHealthReportEvent, StatelessReplicaHealthReportExpiredEvent, StatelessReplicaNewHealthReportEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: StatefulReplicaNewHealthReportEvent, + StatefulReplicaHealthReportExpiredEvent, + StatelessReplicaNewHealthReportEvent, + StatelessReplicaHealthReportExpiredEvent, + ChaosReplicaRemovalScheduledEvent, ChaosReplicaRestartScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, } _subtype_map = { - 'kind': {'ChaosReplicaRemovalScheduled': 'ChaosReplicaRemovalScheduledEvent', 'ChaosReplicaRestartScheduled': 'ChaosReplicaRestartScheduledEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatefulReplicaNewHealthReport': 'StatefulReplicaNewHealthReportEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'StatelessReplicaNewHealthReport': 'StatelessReplicaNewHealthReportEvent'} + 'kind': {'StatefulReplicaNewHealthReport': 'StatefulReplicaNewHealthReportEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatelessReplicaNewHealthReport': 'StatelessReplicaNewHealthReportEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'ChaosReplicaRemovalScheduled': 'ChaosReplicaRemovalScheduledEvent', 'ChaosReplicaRestartScheduled': 'ChaosReplicaRestartScheduledEvent'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaEvent, self).__init__(**kwargs) - self.kind = 'ReplicaEvent' # type: str - self.partition_id = kwargs['partition_id'] - self.replica_id = kwargs['replica_id'] + self.partition_id = kwargs.get('partition_id', None) + self.replica_id = kwargs.get('replica_id', None) + self.kind = 'ReplicaEvent' class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): @@ -5106,48 +4674,31 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -5158,9 +4709,9 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -5169,11 +4720,11 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, @@ -5181,15 +4732,12 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosReplicaRemovalScheduledEvent, self).__init__(**kwargs) - self.kind = 'ChaosReplicaRemovalScheduled' # type: str - self.fault_group_id = kwargs['fault_group_id'] - self.fault_id = kwargs['fault_id'] - self.service_uri = kwargs['service_uri'] + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.service_uri = kwargs.get('service_uri', None) + self.kind = 'ChaosReplicaRemovalScheduled' class ChaosReplicaRestartScheduledEvent(ReplicaEvent): @@ -5197,48 +4745,31 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -5249,9 +4780,9 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -5260,11 +4791,11 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, @@ -5272,29 +4803,29 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosReplicaRestartScheduledEvent, self).__init__(**kwargs) - self.kind = 'ChaosReplicaRestartScheduled' # type: str - self.fault_group_id = kwargs['fault_group_id'] - self.fault_id = kwargs['fault_id'] - self.service_uri = kwargs['service_uri'] + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.service_uri = kwargs.get('service_uri', None) + self.kind = 'ChaosReplicaRestartScheduled' -class ChaosSchedule(msrest.serialization.Model): +class ChaosSchedule(Model): """Defines the schedule used by Chaos. :param start_date: The date and time Chaos will start using this schedule. - :type start_date: ~datetime.datetime - :param expiry_date: The date and time Chaos will continue to use this schedule until. - :type expiry_date: ~datetime.datetime - :param chaos_parameters_dictionary: A mapping of string names to Chaos Parameters to be - referenced by Chaos Schedule Jobs. + Default value: "1601-01-01T00:00:00Z" . + :type start_date: datetime + :param expiry_date: The date and time Chaos will continue to use this + schedule until. Default value: "9999-12-31T23:59:59.999Z" . + :type expiry_date: datetime + :param chaos_parameters_dictionary: A mapping of string names to Chaos + Parameters to be referenced by Chaos Schedule Jobs. :type chaos_parameters_dictionary: list[~azure.servicefabric.models.ChaosParametersDictionaryItem] - :param jobs: A list of all Chaos Schedule Jobs that will be automated by the schedule. + :param jobs: A list of all Chaos Schedule Jobs that will be automated by + the schedule. :type jobs: list[~azure.servicefabric.models.ChaosScheduleJob] """ @@ -5305,10 +4836,7 @@ class ChaosSchedule(msrest.serialization.Model): 'jobs': {'key': 'Jobs', 'type': '[ChaosScheduleJob]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosSchedule, self).__init__(**kwargs) self.start_date = kwargs.get('start_date', "1601-01-01T00:00:00Z") self.expiry_date = kwargs.get('expiry_date', "9999-12-31T23:59:59.999Z") @@ -5316,8 +4844,9 @@ def __init__( self.jobs = kwargs.get('jobs', None) -class ChaosScheduleDescription(msrest.serialization.Model): - """Defines the Chaos Schedule used by Chaos and the version of the Chaos Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. +class ChaosScheduleDescription(Model): + """Defines the Chaos Schedule used by Chaos and the version of the Chaos + Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. :param version: The version number of the Schedule. :type version: int @@ -5334,24 +4863,24 @@ class ChaosScheduleDescription(msrest.serialization.Model): 'schedule': {'key': 'Schedule', 'type': 'ChaosSchedule'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosScheduleDescription, self).__init__(**kwargs) self.version = kwargs.get('version', None) self.schedule = kwargs.get('schedule', None) -class ChaosScheduleJob(msrest.serialization.Model): - """Defines a repetition rule and parameters of Chaos to be used with the Chaos Schedule. +class ChaosScheduleJob(Model): + """Defines a repetition rule and parameters of Chaos to be used with the Chaos + Schedule. - :param chaos_parameters: A reference to which Chaos Parameters of the Chaos Schedule to use. + :param chaos_parameters: A reference to which Chaos Parameters of the + Chaos Schedule to use. :type chaos_parameters: str - :param days: Defines the days of the week that a Chaos Schedule Job will run for. + :param days: Defines the days of the week that a Chaos Schedule Job will + run for. :type days: ~azure.servicefabric.models.ChaosScheduleJobActiveDaysOfWeek - :param times: A list of Time Ranges that specify when during active days that this job will - run. The times are interpreted as UTC. + :param times: A list of Time Ranges that specify when during active days + that this job will run. The times are interpreted as UTC. :type times: list[~azure.servicefabric.models.TimeRange] """ @@ -5361,32 +4890,36 @@ class ChaosScheduleJob(msrest.serialization.Model): 'times': {'key': 'Times', 'type': '[TimeRange]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosScheduleJob, self).__init__(**kwargs) self.chaos_parameters = kwargs.get('chaos_parameters', None) self.days = kwargs.get('days', None) self.times = kwargs.get('times', None) -class ChaosScheduleJobActiveDaysOfWeek(msrest.serialization.Model): +class ChaosScheduleJobActiveDaysOfWeek(Model): """Defines the days of the week that a Chaos Schedule Job will run for. :param sunday: Indicates if the Chaos Schedule Job will run on Sunday. + Default value: False . :type sunday: bool :param monday: Indicates if the Chaos Schedule Job will run on Monday. + Default value: False . :type monday: bool :param tuesday: Indicates if the Chaos Schedule Job will run on Tuesday. + Default value: False . :type tuesday: bool - :param wednesday: Indicates if the Chaos Schedule Job will run on Wednesday. + :param wednesday: Indicates if the Chaos Schedule Job will run on + Wednesday. Default value: False . :type wednesday: bool :param thursday: Indicates if the Chaos Schedule Job will run on Thursday. + Default value: False . :type thursday: bool :param friday: Indicates if the Chaos Schedule Job will run on Friday. + Default value: False . :type friday: bool :param saturday: Indicates if the Chaos Schedule Job will run on Saturday. + Default value: False . :type saturday: bool """ @@ -5400,10 +4933,7 @@ class ChaosScheduleJobActiveDaysOfWeek(msrest.serialization.Model): 'saturday': {'key': 'Saturday', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosScheduleJobActiveDaysOfWeek, self).__init__(**kwargs) self.sunday = kwargs.get('sunday', False) self.monday = kwargs.get('monday', False) @@ -5418,68 +4948,49 @@ class ClusterEvent(FabricEvent): """Represents the base for all Cluster Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ChaosStartedEvent, ChaosStoppedEvent, ClusterHealthReportExpiredEvent, ClusterNewHealthReportEvent, ClusterUpgradeCompletedEvent, ClusterUpgradeDomainCompletedEvent, ClusterUpgradeRollbackCompletedEvent, ClusterUpgradeRollbackStartedEvent, ClusterUpgradeStartedEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: ClusterNewHealthReportEvent, + ClusterHealthReportExpiredEvent, ClusterUpgradeCompletedEvent, + ClusterUpgradeDomainCompletedEvent, ClusterUpgradeRollbackCompletedEvent, + ClusterUpgradeRollbackStartedEvent, ClusterUpgradeStartedEvent, + ChaosStoppedEvent, ChaosStartedEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'ChaosStarted': 'ChaosStartedEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterNewHealthReport': 'ClusterNewHealthReportEvent', 'ClusterUpgradeCompleted': 'ClusterUpgradeCompletedEvent', 'ClusterUpgradeDomainCompleted': 'ClusterUpgradeDomainCompletedEvent', 'ClusterUpgradeRollbackCompleted': 'ClusterUpgradeRollbackCompletedEvent', 'ClusterUpgradeRollbackStarted': 'ClusterUpgradeRollbackStartedEvent', 'ClusterUpgradeStarted': 'ClusterUpgradeStartedEvent'} + 'kind': {'ClusterNewHealthReport': 'ClusterNewHealthReportEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterUpgradeCompleted': 'ClusterUpgradeCompletedEvent', 'ClusterUpgradeDomainCompleted': 'ClusterUpgradeDomainCompletedEvent', 'ClusterUpgradeRollbackCompleted': 'ClusterUpgradeRollbackCompletedEvent', 'ClusterUpgradeRollbackStarted': 'ClusterUpgradeRollbackStartedEvent', 'ClusterUpgradeStarted': 'ClusterUpgradeStartedEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ChaosStarted': 'ChaosStartedEvent'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterEvent, self).__init__(**kwargs) - self.kind = 'ClusterEvent' # type: str + self.kind = 'ClusterEvent' class ChaosStartedEvent(ClusterEvent): @@ -5487,51 +4998,34 @@ class ChaosStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param max_concurrent_faults: Required. Maximum number of concurrent faults. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_concurrent_faults: Required. Maximum number of concurrent + faults. :type max_concurrent_faults: long :param time_to_run_in_seconds: Required. Time to run in seconds. :type time_to_run_in_seconds: float - :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum timeout for cluster - stabilization in seconds. + :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum + timeout for cluster stabilization in seconds. :type max_cluster_stabilization_timeout_in_seconds: float - :param wait_time_between_iterations_in_seconds: Required. Wait time between iterations in - seconds. + :param wait_time_between_iterations_in_seconds: Required. Wait time + between iterations in seconds. :type wait_time_between_iterations_in_seconds: float - :param wait_time_between_faults_in_seconds: Required. Wait time between faults in seconds. + :param wait_time_between_faults_in_seconds: Required. Wait time between + faults in seconds. :type wait_time_between_faults_in_seconds: float - :param move_replica_fault_enabled: Required. Indicates MoveReplica fault is enabled. + :param move_replica_fault_enabled: Required. Indicates MoveReplica fault + is enabled. :type move_replica_fault_enabled: bool :param included_node_type_list: Required. List of included Node types. :type included_node_type_list: str @@ -5544,9 +5038,9 @@ class ChaosStartedEvent(ClusterEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'max_concurrent_faults': {'required': True}, 'time_to_run_in_seconds': {'required': True}, 'max_cluster_stabilization_timeout_in_seconds': {'required': True}, @@ -5560,11 +5054,11 @@ class ChaosStartedEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_concurrent_faults': {'key': 'MaxConcurrentFaults', 'type': 'long'}, 'time_to_run_in_seconds': {'key': 'TimeToRunInSeconds', 'type': 'float'}, 'max_cluster_stabilization_timeout_in_seconds': {'key': 'MaxClusterStabilizationTimeoutInSeconds', 'type': 'float'}, @@ -5577,22 +5071,19 @@ class ChaosStartedEvent(ClusterEvent): 'chaos_context': {'key': 'ChaosContext', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosStartedEvent, self).__init__(**kwargs) - self.kind = 'ChaosStarted' # type: str - self.max_concurrent_faults = kwargs['max_concurrent_faults'] - self.time_to_run_in_seconds = kwargs['time_to_run_in_seconds'] - self.max_cluster_stabilization_timeout_in_seconds = kwargs['max_cluster_stabilization_timeout_in_seconds'] - self.wait_time_between_iterations_in_seconds = kwargs['wait_time_between_iterations_in_seconds'] - self.wait_time_between_faults_in_seconds = kwargs['wait_time_between_faults_in_seconds'] - self.move_replica_fault_enabled = kwargs['move_replica_fault_enabled'] - self.included_node_type_list = kwargs['included_node_type_list'] - self.included_application_list = kwargs['included_application_list'] - self.cluster_health_policy = kwargs['cluster_health_policy'] - self.chaos_context = kwargs['chaos_context'] + self.max_concurrent_faults = kwargs.get('max_concurrent_faults', None) + self.time_to_run_in_seconds = kwargs.get('time_to_run_in_seconds', None) + self.max_cluster_stabilization_timeout_in_seconds = kwargs.get('max_cluster_stabilization_timeout_in_seconds', None) + self.wait_time_between_iterations_in_seconds = kwargs.get('wait_time_between_iterations_in_seconds', None) + self.wait_time_between_faults_in_seconds = kwargs.get('wait_time_between_faults_in_seconds', None) + self.move_replica_fault_enabled = kwargs.get('move_replica_fault_enabled', None) + self.included_node_type_list = kwargs.get('included_node_type_list', None) + self.included_application_list = kwargs.get('included_application_list', None) + self.cluster_health_policy = kwargs.get('cluster_health_policy', None) + self.chaos_context = kwargs.get('chaos_context', None) + self.kind = 'ChaosStarted' class ChaosStoppedEvent(ClusterEvent): @@ -5600,100 +5091,96 @@ class ChaosStoppedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param reason: Required. Describes reason. :type reason: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'reason': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosStoppedEvent, self).__init__(**kwargs) - self.kind = 'ChaosStopped' # type: str - self.reason = kwargs['reason'] - - -class ChaosTargetFilter(msrest.serialization.Model): - """Defines all filters for targeted Chaos faults, for example, faulting only certain node types or faulting only certain applications. -If ChaosTargetFilter is not used, Chaos faults all cluster entities. If ChaosTargetFilter is used, Chaos faults only the entities that meet the ChaosTargetFilter -specification. NodeTypeInclusionList and ApplicationInclusionList allow a union semantics only. It is not possible to specify an intersection -of NodeTypeInclusionList and ApplicationInclusionList. For example, it is not possible to specify "fault this application only when it is on that node type." -Once an entity is included in either NodeTypeInclusionList or ApplicationInclusionList, that entity cannot be excluded using ChaosTargetFilter. Even if -applicationX does not appear in ApplicationInclusionList, in some Chaos iteration applicationX can be faulted because it happens to be on a node of nodeTypeY that is included -in NodeTypeInclusionList. If both NodeTypeInclusionList and ApplicationInclusionList are null or empty, an ArgumentException is thrown. - - :param node_type_inclusion_list: A list of node types to include in Chaos faults. - All types of faults (restart node, restart code package, remove replica, restart replica, move - primary, and move secondary) are enabled for the nodes of these node types. - If a node type (say NodeTypeX) does not appear in the NodeTypeInclusionList, then node level - faults (like NodeRestart) will never be enabled for the nodes of - NodeTypeX, but code package and replica faults can still be enabled for NodeTypeX if an - application in the ApplicationInclusionList. + self.reason = kwargs.get('reason', None) + self.kind = 'ChaosStopped' + + +class ChaosTargetFilter(Model): + """Defines all filters for targeted Chaos faults, for example, faulting only + certain node types or faulting only certain applications. + If ChaosTargetFilter is not used, Chaos faults all cluster entities. If + ChaosTargetFilter is used, Chaos faults only the entities that meet the + ChaosTargetFilter + specification. NodeTypeInclusionList and ApplicationInclusionList allow a + union semantics only. It is not possible to specify an intersection + of NodeTypeInclusionList and ApplicationInclusionList. For example, it is + not possible to specify "fault this application only when it is on that + node type." + Once an entity is included in either NodeTypeInclusionList or + ApplicationInclusionList, that entity cannot be excluded using + ChaosTargetFilter. Even if + applicationX does not appear in ApplicationInclusionList, in some Chaos + iteration applicationX can be faulted because it happens to be on a node of + nodeTypeY that is included + in NodeTypeInclusionList. If both NodeTypeInclusionList and + ApplicationInclusionList are null or empty, an ArgumentException is thrown. + + :param node_type_inclusion_list: A list of node types to include in Chaos + faults. + All types of faults (restart node, restart code package, remove replica, + restart replica, move primary, and move secondary) are enabled for the + nodes of these node types. + If a node type (say NodeTypeX) does not appear in the + NodeTypeInclusionList, then node level faults (like NodeRestart) will + never be enabled for the nodes of + NodeTypeX, but code package and replica faults can still be enabled for + NodeTypeX if an application in the ApplicationInclusionList. happens to reside on a node of NodeTypeX. - At most 100 node type names can be included in this list, to increase this number, a config - upgrade is required for MaxNumberOfNodeTypesInChaosEntityFilter configuration. + At most 100 node type names can be included in this list, to increase this + number, a config upgrade is required for + MaxNumberOfNodeTypesInChaosEntityFilter configuration. :type node_type_inclusion_list: list[str] - :param application_inclusion_list: A list of application URIs to include in Chaos faults. - All replicas belonging to services of these applications are amenable to replica faults - (restart replica, remove replica, move primary, and move secondary) by Chaos. - Chaos may restart a code package only if the code package hosts replicas of these applications - only. - If an application does not appear in this list, it can still be faulted in some Chaos - iteration if the application ends up on a node of a node type that is included in - NodeTypeInclusionList. - However, if applicationX is tied to nodeTypeY through placement constraints and applicationX - is absent from ApplicationInclusionList and nodeTypeY is absent from NodeTypeInclusionList, - then applicationX will never be faulted. - At most 1000 application names can be included in this list, to increase this number, a config - upgrade is required for MaxNumberOfApplicationsInChaosEntityFilter configuration. + :param application_inclusion_list: A list of application URIs to include + in Chaos faults. + All replicas belonging to services of these applications are amenable to + replica faults (restart replica, remove replica, move primary, and move + secondary) by Chaos. + Chaos may restart a code package only if the code package hosts replicas + of these applications only. + If an application does not appear in this list, it can still be faulted in + some Chaos iteration if the application ends up on a node of a node type + that is included in NodeTypeInclusionList. + However, if applicationX is tied to nodeTypeY through placement + constraints and applicationX is absent from ApplicationInclusionList and + nodeTypeY is absent from NodeTypeInclusionList, then applicationX will + never be faulted. + At most 1000 application names can be included in this list, to increase + this number, a config upgrade is required for + MaxNumberOfApplicationsInChaosEntityFilter configuration. :type application_inclusion_list: list[str] """ @@ -5702,180 +5189,170 @@ class ChaosTargetFilter(msrest.serialization.Model): 'application_inclusion_list': {'key': 'ApplicationInclusionList', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ChaosTargetFilter, self).__init__(**kwargs) self.node_type_inclusion_list = kwargs.get('node_type_inclusion_list', None) self.application_inclusion_list = kwargs.get('application_inclusion_list', None) -class PropertyBatchOperation(msrest.serialization.Model): - """Represents the base type for property operations that can be put into a batch and submitted. +class PropertyBatchOperation(Model): + """Represents the base type for property operations that can be put into a + batch and submitted. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CheckExistsPropertyBatchOperation, CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation, DeletePropertyBatchOperation, GetPropertyBatchOperation, PutPropertyBatchOperation. + sub-classes are: CheckExistsPropertyBatchOperation, + CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation, + DeletePropertyBatchOperation, GetPropertyBatchOperation, + PutPropertyBatchOperation All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'CheckExists': 'CheckExistsPropertyBatchOperation', 'CheckSequence': 'CheckSequencePropertyBatchOperation', 'CheckValue': 'CheckValuePropertyBatchOperation', 'Delete': 'DeletePropertyBatchOperation', 'Get': 'GetPropertyBatchOperation', 'Put': 'PutPropertyBatchOperation'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PropertyBatchOperation, self).__init__(**kwargs) - self.kind = None # type: Optional[str] - self.property_name = kwargs['property_name'] + self.property_name = kwargs.get('property_name', None) + self.kind = None class CheckExistsPropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that compares the Boolean existence of a property with the Exists argument. -The PropertyBatchOperation operation fails if the property's existence is not equal to the Exists argument. -The CheckExistsPropertyBatchOperation is generally used as a precondition for the write operations in the batch. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that compares the Boolean existence of + a property with the Exists argument. + The PropertyBatchOperation operation fails if the property's existence is + not equal to the Exists argument. + The CheckExistsPropertyBatchOperation is generally used as a precondition + for the write operations in the batch. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param exists: Required. Whether or not the property should exist for the operation to pass. + :param kind: Required. Constant filled by server. + :type kind: str + :param exists: Required. Whether or not the property should exist for the + operation to pass. :type exists: bool """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, 'exists': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'exists': {'key': 'Exists', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(CheckExistsPropertyBatchOperation, self).__init__(**kwargs) - self.kind = 'CheckExists' # type: str - self.exists = kwargs['exists'] + self.exists = kwargs.get('exists', None) + self.kind = 'CheckExists' class CheckSequencePropertyBatchOperation(PropertyBatchOperation): - """Compares the Sequence Number of a property with the SequenceNumber argument. -A property's sequence number can be thought of as that property's version. -Every time the property is modified, its sequence number is increased. -The sequence number can be found in a property's metadata. -The comparison fails if the sequence numbers are not equal. -CheckSequencePropertyBatchOperation is generally used as a precondition for the write operations in the batch. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Compares the Sequence Number of a property with the SequenceNumber + argument. + A property's sequence number can be thought of as that property's version. + Every time the property is modified, its sequence number is increased. + The sequence number can be found in a property's metadata. + The comparison fails if the sequence numbers are not equal. + CheckSequencePropertyBatchOperation is generally used as a precondition for + the write operations in the batch. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str :param sequence_number: Required. The expected sequence number. :type sequence_number: str """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, 'sequence_number': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(CheckSequencePropertyBatchOperation, self).__init__(**kwargs) - self.kind = 'CheckSequence' # type: str - self.sequence_number = kwargs['sequence_number'] + self.sequence_number = kwargs.get('sequence_number', None) + self.kind = 'CheckSequence' class CheckValuePropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that compares the value of the property with the expected value. -The CheckValuePropertyBatchOperation is generally used as a precondition for the write operations in the batch. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that compares the value of the property + with the expected value. + The CheckValuePropertyBatchOperation is generally used as a precondition + for the write operations in the batch. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str :param value: Required. The expected property value. :type value: ~azure.servicefabric.models.PropertyValue """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, 'value': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(CheckValuePropertyBatchOperation, self).__init__(**kwargs) - self.kind = 'CheckValue' # type: str - self.value = kwargs['value'] + self.value = kwargs.get('value', None) + self.kind = 'CheckValue' -class ClusterConfiguration(msrest.serialization.Model): +class ClusterConfiguration(Model): """Information about the standalone cluster configuration. - :param cluster_configuration: The contents of the cluster configuration file. + :param cluster_configuration: The contents of the cluster configuration + file. :type cluster_configuration: str """ @@ -5883,54 +5360,62 @@ class ClusterConfiguration(msrest.serialization.Model): 'cluster_configuration': {'key': 'ClusterConfiguration', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterConfiguration, self).__init__(**kwargs) self.cluster_configuration = kwargs.get('cluster_configuration', None) -class ClusterConfigurationUpgradeDescription(msrest.serialization.Model): +class ClusterConfigurationUpgradeDescription(Model): """Describes the parameters for a standalone cluster configuration upgrade. All required parameters must be populated in order to send to Azure. - :param cluster_config: Required. The cluster configuration as a JSON string. For example, `this - file - `_ - contains JSON describing the `nodes and other properties of the cluster - `_. + :param cluster_config: Required. The cluster configuration as a JSON + string. For example, [this + file](https://github.com/Azure-Samples/service-fabric-dotnet-standalone-cluster-configuration/blob/master/Samples/ClusterConfig.Unsecure.DevCluster.json) + contains JSON describing the [nodes and other properties of the + cluster](https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-manifest). :type cluster_config: str - :param health_check_retry_timeout: The length of time between attempts to perform health checks - if the application or cluster is not healthy. - :type health_check_retry_timeout: ~datetime.timedelta - :param health_check_wait_duration_in_seconds: The length of time to wait after completing an - upgrade domain before starting the health checks process. - :type health_check_wait_duration_in_seconds: ~datetime.timedelta - :param health_check_stable_duration_in_seconds: The length of time that the application or - cluster must remain healthy before the upgrade proceeds to the next upgrade domain. - :type health_check_stable_duration_in_seconds: ~datetime.timedelta - :param upgrade_domain_timeout_in_seconds: The timeout for the upgrade domain. - :type upgrade_domain_timeout_in_seconds: ~datetime.timedelta - :param upgrade_timeout_in_seconds: The upgrade timeout. - :type upgrade_timeout_in_seconds: ~datetime.timedelta - :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy - applications during the upgrade. Allowed values are integer values from zero to 100. + :param health_check_retry_timeout: The length of time between attempts to + perform health checks if the application or cluster is not healthy. + Default value: "PT0H0M0S" . + :type health_check_retry_timeout: timedelta + :param health_check_wait_duration_in_seconds: The length of time to wait + after completing an upgrade domain before starting the health checks + process. Default value: "PT0H0M0S" . + :type health_check_wait_duration_in_seconds: timedelta + :param health_check_stable_duration_in_seconds: The length of time that + the application or cluster must remain healthy before the upgrade proceeds + to the next upgrade domain. Default value: "PT0H0M0S" . + :type health_check_stable_duration_in_seconds: timedelta + :param upgrade_domain_timeout_in_seconds: The timeout for the upgrade + domain. Default value: "PT0H0M0S" . + :type upgrade_domain_timeout_in_seconds: timedelta + :param upgrade_timeout_in_seconds: The upgrade timeout. Default value: + "PT0H0M0S" . + :type upgrade_timeout_in_seconds: timedelta + :param max_percent_unhealthy_applications: The maximum allowed percentage + of unhealthy applications during the upgrade. Allowed values are integer + values from zero to 100. Default value: 0 . :type max_percent_unhealthy_applications: int - :param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes during - the upgrade. Allowed values are integer values from zero to 100. + :param max_percent_unhealthy_nodes: The maximum allowed percentage of + unhealthy nodes during the upgrade. Allowed values are integer values from + zero to 100. Default value: 0 . :type max_percent_unhealthy_nodes: int - :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage of delta health - degradation during the upgrade. Allowed values are integer values from zero to 100. + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage + of delta health degradation during the upgrade. Allowed values are integer + values from zero to 100. Default value: 0 . :type max_percent_delta_unhealthy_nodes: int - :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum allowed percentage of - upgrade domain delta health degradation during the upgrade. Allowed values are integer values - from zero to 100. + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum + allowed percentage of upgrade domain delta health degradation during the + upgrade. Allowed values are integer values from zero to 100. Default + value: 0 . :type max_percent_upgrade_domain_delta_unhealthy_nodes: int - :param application_health_policies: Defines the application health policy map used to evaluate - the health of an application or one of its children entities. - :type application_health_policies: ~azure.servicefabric.models.ApplicationHealthPolicies + :param application_health_policies: Defines the application health policy + map used to evaluate the health of an application or one of its children + entities. + :type application_health_policies: + ~azure.servicefabric.models.ApplicationHealthPolicies """ _validation = { @@ -5951,12 +5436,9 @@ class ClusterConfigurationUpgradeDescription(msrest.serialization.Model): 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterConfigurationUpgradeDescription, self).__init__(**kwargs) - self.cluster_config = kwargs['cluster_config'] + self.cluster_config = kwargs.get('cluster_config', None) self.health_check_retry_timeout = kwargs.get('health_check_retry_timeout', "PT0H0M0S") self.health_check_wait_duration_in_seconds = kwargs.get('health_check_wait_duration_in_seconds', "PT0H0M0S") self.health_check_stable_duration_in_seconds = kwargs.get('health_check_stable_duration_in_seconds', "PT0H0M0S") @@ -5969,12 +5451,13 @@ def __init__( self.application_health_policies = kwargs.get('application_health_policies', None) -class ClusterConfigurationUpgradeStatusInfo(msrest.serialization.Model): +class ClusterConfigurationUpgradeStatusInfo(Model): """Information about a standalone cluster configuration upgrade status. - :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", - "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", - "RollingForwardInProgress", "RollingForwardCompleted", "Failed". + :param upgrade_state: The state of the upgrade domain. Possible values + include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', + 'RollingForwardPending', 'RollingForwardInProgress', + 'RollingForwardCompleted', 'Failed' :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState :param progress_status: The cluster manifest version. :type progress_status: int @@ -5991,10 +5474,7 @@ class ClusterConfigurationUpgradeStatusInfo(msrest.serialization.Model): 'details': {'key': 'Details', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterConfigurationUpgradeStatusInfo, self).__init__(**kwargs) self.upgrade_state = kwargs.get('upgrade_state', None) self.progress_status = kwargs.get('progress_status', None) @@ -6004,28 +5484,35 @@ def __init__( class ClusterHealth(EntityHealth): """Represents the health of the cluster. -Contains the cluster aggregated health state, the cluster application and node health states as well as the health events and the unhealthy evaluations. - - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + Contains the cluster aggregated health state, the cluster application and + node health states as well as the health events and the unhealthy + evaluations. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param node_health_states: Cluster node health states as found in the health store. - :type node_health_states: list[~azure.servicefabric.models.NodeHealthState] - :param application_health_states: Cluster application health states as found in the health - store. - :type application_health_states: list[~azure.servicefabric.models.ApplicationHealthState] + :param node_health_states: Cluster node health states as found in the + health store. + :type node_health_states: + list[~azure.servicefabric.models.NodeHealthState] + :param application_health_states: Cluster application health states as + found in the health store. + :type application_health_states: + list[~azure.servicefabric.models.ApplicationHealthState] """ _attribute_map = { @@ -6037,31 +5524,33 @@ class ClusterHealth(EntityHealth): 'application_health_states': {'key': 'ApplicationHealthStates', 'type': '[ApplicationHealthState]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterHealth, self).__init__(**kwargs) self.node_health_states = kwargs.get('node_health_states', None) self.application_health_states = kwargs.get('application_health_states', None) -class ClusterHealthChunk(msrest.serialization.Model): +class ClusterHealthChunk(Model): """Represents the health chunk of the cluster. -Contains the cluster aggregated health state, and the cluster entities that respect the input filter. - - :param health_state: The HealthState representing the aggregated health state of the cluster - computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired cluster health policy and the application - health policies. Possible values include: "Invalid", "Ok", "Warning", "Error", "Unknown". + Contains the cluster aggregated health state, and the cluster entities that + respect the input filter. + + :param health_state: The HealthState representing the aggregated health + state of the cluster computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired cluster health policy and + the application health policies. Possible values include: 'Invalid', 'Ok', + 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param node_health_state_chunks: The list of node health state chunks in the cluster that - respect the filters in the cluster health chunk query description. - :type node_health_state_chunks: ~azure.servicefabric.models.NodeHealthStateChunkList - :param application_health_state_chunks: The list of application health state chunks in the - cluster that respect the filters in the cluster health chunk query description. + :param node_health_state_chunks: The list of node health state chunks in + the cluster that respect the filters in the cluster health chunk query + description. + :type node_health_state_chunks: + ~azure.servicefabric.models.NodeHealthStateChunkList + :param application_health_state_chunks: The list of application health + state chunks in the cluster that respect the filters in the cluster health + chunk query description. :type application_health_state_chunks: ~azure.servicefabric.models.ApplicationHealthStateChunkList """ @@ -6072,41 +5561,49 @@ class ClusterHealthChunk(msrest.serialization.Model): 'application_health_state_chunks': {'key': 'ApplicationHealthStateChunks', 'type': 'ApplicationHealthStateChunkList'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterHealthChunk, self).__init__(**kwargs) self.health_state = kwargs.get('health_state', None) self.node_health_state_chunks = kwargs.get('node_health_state_chunks', None) self.application_health_state_chunks = kwargs.get('application_health_state_chunks', None) -class ClusterHealthChunkQueryDescription(msrest.serialization.Model): - """The cluster health chunk query description, which can specify the health policies to evaluate cluster health and very expressive filters to select which cluster entities to include in response. +class ClusterHealthChunkQueryDescription(Model): + """The cluster health chunk query description, which can specify the health + policies to evaluate cluster health and very expressive filters to select + which cluster entities to include in response. - :param node_filters: Defines a list of filters that specify which nodes to be included in the - returned cluster health chunk. - If no filters are specified, no nodes are returned. All the nodes are used to evaluate the - cluster's aggregated health state, regardless of the input filters. + :param node_filters: Defines a list of filters that specify which nodes to + be included in the returned cluster health chunk. + If no filters are specified, no nodes are returned. All the nodes are used + to evaluate the cluster's aggregated health state, regardless of the input + filters. The cluster health chunk query may specify multiple node filters. - For example, it can specify a filter to return all nodes with health state Error and another - filter to always include a node identified by its NodeName. - :type node_filters: list[~azure.servicefabric.models.NodeHealthStateFilter] - :param application_filters: Defines a list of filters that specify which applications to be - included in the returned cluster health chunk. - If no filters are specified, no applications are returned. All the applications are used to - evaluate the cluster's aggregated health state, regardless of the input filters. + For example, it can specify a filter to return all nodes with health state + Error and another filter to always include a node identified by its + NodeName. + :type node_filters: + list[~azure.servicefabric.models.NodeHealthStateFilter] + :param application_filters: Defines a list of filters that specify which + applications to be included in the returned cluster health chunk. + If no filters are specified, no applications are returned. All the + applications are used to evaluate the cluster's aggregated health state, + regardless of the input filters. The cluster health chunk query may specify multiple application filters. - For example, it can specify a filter to return all applications with health state Error and - another filter to always include applications of a specified application type. - :type application_filters: list[~azure.servicefabric.models.ApplicationHealthStateFilter] - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param application_health_policies: Defines the application health policy map used to evaluate - the health of an application or one of its children entities. - :type application_health_policies: ~azure.servicefabric.models.ApplicationHealthPolicies + For example, it can specify a filter to return all applications with + health state Error and another filter to always include applications of a + specified application type. + :type application_filters: + list[~azure.servicefabric.models.ApplicationHealthStateFilter] + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param application_health_policies: Defines the application health policy + map used to evaluate the health of an application or one of its children + entities. + :type application_health_policies: + ~azure.servicefabric.models.ApplicationHealthPolicies """ _attribute_map = { @@ -6116,10 +5613,7 @@ class ClusterHealthChunkQueryDescription(msrest.serialization.Model): 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterHealthChunkQueryDescription, self).__init__(**kwargs) self.node_filters = kwargs.get('node_filters', None) self.application_filters = kwargs.get('application_filters', None) @@ -6127,22 +5621,24 @@ def __init__( self.application_health_policies = kwargs.get('application_health_policies', None) -class ClusterHealthPolicies(msrest.serialization.Model): +class ClusterHealthPolicies(Model): """Health policies to evaluate cluster health. - :param application_health_policy_map: Defines a map that contains specific application health - policies for different applications. - Each entry specifies as key the application name and as value an ApplicationHealthPolicy used - to evaluate the application health. - If an application is not specified in the map, the application health evaluation uses the - ApplicationHealthPolicy found in its application manifest or the default application health - policy (if no health policy is defined in the manifest). + :param application_health_policy_map: Defines a map that contains specific + application health policies for different applications. + Each entry specifies as key the application name and as value an + ApplicationHealthPolicy used to evaluate the application health. + If an application is not specified in the map, the application health + evaluation uses the ApplicationHealthPolicy found in its application + manifest or the default application health policy (if no health policy is + defined in the manifest). The map is empty by default. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy """ _attribute_map = { @@ -6150,104 +5646,108 @@ class ClusterHealthPolicies(msrest.serialization.Model): 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterHealthPolicies, self).__init__(**kwargs) self.application_health_policy_map = kwargs.get('application_health_policy_map', None) self.cluster_health_policy = kwargs.get('cluster_health_policy', None) -class ClusterHealthPolicy(msrest.serialization.Model): - """Defines a health policy used to evaluate the health of the cluster or of a cluster node. +class ClusterHealthPolicy(Model): + """Defines a health policy used to evaluate the health of the cluster or of a + cluster node. - :param consider_warning_as_error: Indicates whether warnings are treated with the same severity - as errors. + :param consider_warning_as_error: Indicates whether warnings are treated + with the same severity as errors. Default value: False . :type consider_warning_as_error: bool - :param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes before - reporting an error. For example, to allow 10% of nodes to be unhealthy, this value would be 10. - - The percentage represents the maximum tolerated percentage of nodes that can be unhealthy - before the cluster is considered in error. - If the percentage is respected but there is at least one unhealthy node, the health is - evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy nodes over the total number - of nodes in the cluster. - The computation rounds up to tolerate one failure on small numbers of nodes. Default - percentage is zero. - - In large clusters, some nodes will always be down or out for repairs, so this percentage - should be configured to tolerate that. + :param max_percent_unhealthy_nodes: The maximum allowed percentage of + unhealthy nodes before reporting an error. For example, to allow 10% of + nodes to be unhealthy, this value would be 10. + The percentage represents the maximum tolerated percentage of nodes that + can be unhealthy before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy node, + the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy nodes + over the total number of nodes in the cluster. + The computation rounds up to tolerate one failure on small numbers of + nodes. Default percentage is zero. + In large clusters, some nodes will always be down or out for repairs, so + this percentage should be configured to tolerate that. Default value: 0 . :type max_percent_unhealthy_nodes: int - :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy - applications before reporting an error. For example, to allow 10% of applications to be - unhealthy, this value would be 10. - - The percentage represents the maximum tolerated percentage of applications that can be - unhealthy before the cluster is considered in error. - If the percentage is respected but there is at least one unhealthy application, the health is - evaluated as Warning. - This is calculated by dividing the number of unhealthy applications over the total number of - application instances in the cluster, excluding applications of application types that are - included in the ApplicationTypeHealthPolicyMap. - The computation rounds up to tolerate one failure on small numbers of applications. Default - percentage is zero. + :param max_percent_unhealthy_applications: The maximum allowed percentage + of unhealthy applications before reporting an error. For example, to allow + 10% of applications to be unhealthy, this value would be 10. + The percentage represents the maximum tolerated percentage of applications + that can be unhealthy before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy + application, the health is evaluated as Warning. + This is calculated by dividing the number of unhealthy applications over + the total number of application instances in the cluster, excluding + applications of application types that are included in the + ApplicationTypeHealthPolicyMap. + The computation rounds up to tolerate one failure on small numbers of + applications. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_applications: int - :param application_type_health_policy_map: Defines a map with max percentage unhealthy - applications for specific application types. - Each entry specifies as key the application type name and as value an integer that represents - the MaxPercentUnhealthyApplications percentage used to evaluate the applications of the - specified application type. - - The application type health policy map can be used during cluster health evaluation to - describe special application types. - The application types included in the map are evaluated against the percentage specified in - the map, and not with the global MaxPercentUnhealthyApplications defined in the cluster health - policy. - The applications of application types specified in the map are not counted against the global - pool of applications. - For example, if some applications of a type are critical, the cluster administrator can add an - entry to the map for that application type + :param application_type_health_policy_map: Defines a map with max + percentage unhealthy applications for specific application types. + Each entry specifies as key the application type name and as value an + integer that represents the MaxPercentUnhealthyApplications percentage + used to evaluate the applications of the specified application type. + The application type health policy map can be used during cluster health + evaluation to describe special application types. + The application types included in the map are evaluated against the + percentage specified in the map, and not with the global + MaxPercentUnhealthyApplications defined in the cluster health policy. + The applications of application types specified in the map are not counted + against the global pool of applications. + For example, if some applications of a type are critical, the cluster + administrator can add an entry to the map for that application type and assign it a value of 0% (that is, do not tolerate any failures). - All other applications can be evaluated with MaxPercentUnhealthyApplications set to 20% to - tolerate some failures out of the thousands of application instances. - The application type health policy map is used only if the cluster manifest enables - application type health evaluation using the configuration entry for + All other applications can be evaluated with + MaxPercentUnhealthyApplications set to 20% to tolerate some failures out + of the thousands of application instances. + The application type health policy map is used only if the cluster + manifest enables application type health evaluation using the + configuration entry for HealthManager/EnableApplicationTypeHealthEvaluation. :type application_type_health_policy_map: list[~azure.servicefabric.models.ApplicationTypeHealthPolicyMapItem] - :param node_type_health_policy_map: Defines a map with max percentage unhealthy nodes for - specific node types. - Each entry specifies as key the node type name and as value an integer that represents the - MaxPercentUnhealthyNodes percentage used to evaluate the nodes of the specified node type. - - The node type health policy map can be used during cluster health evaluation to describe - special node types. - They are evaluated against the percentages associated with their node type name in the map. - Setting this has no impact on the global pool of nodes used for MaxPercentUnhealthyNodes. - The node type health policy map is used only if the cluster manifest enables node type health - evaluation using the configuration entry for HealthManager/EnableNodeTypeHealthEvaluation. - - For example, given a cluster with many nodes of different types, with important work hosted on - node type "SpecialNodeType" that should not tolerate any nodes down. - You can specify global MaxPercentUnhealthyNodes to 20% to tolerate some failures for all - nodes, but for the node type "SpecialNodeType", set the MaxPercentUnhealthyNodes to 0 by - setting the value in the key value pair in NodeTypeHealthPolicyMapItem. The key is the node - type name. - This way, as long as no nodes of type "SpecialNodeType" are in Error state, - even if some of the many nodes in the global pool are in Error state, but below the global - unhealthy percentage, the cluster would be evaluated to Warning. - A Warning health state does not impact cluster upgrade or other monitoring triggered by Error - health state. - But even one node of type SpecialNodeType in Error would make cluster unhealthy (in Error - rather than Warning/Ok), which triggers rollback or pauses the cluster upgrade, depending on - the upgrade configuration. - - Conversely, setting the global MaxPercentUnhealthyNodes to 0, and setting SpecialNodeType's - max percent unhealthy nodes to 100, - with one node of type SpecialNodeType in Error state would still put the cluster in an Error - state, since the global restriction is more strict in this case. + :param node_type_health_policy_map: Defines a map with max percentage + unhealthy nodes for specific node types. + Each entry specifies as key the node type name and as value an integer + that represents the MaxPercentUnhealthyNodes percentage used to evaluate + the nodes of the specified node type. + The node type health policy map can be used during cluster health + evaluation to describe special node types. + They are evaluated against the percentages associated with their node type + name in the map. + Setting this has no impact on the global pool of nodes used for + MaxPercentUnhealthyNodes. + The node type health policy map is used only if the cluster manifest + enables node type health evaluation using the configuration entry for + HealthManager/EnableNodeTypeHealthEvaluation. + For example, given a cluster with many nodes of different types, with + important work hosted on node type "SpecialNodeType" that should not + tolerate any nodes down. + You can specify global MaxPercentUnhealthyNodes to 20% to tolerate some + failures for all nodes, but for the node type "SpecialNodeType", set the + MaxPercentUnhealthyNodes to 0 by + setting the value in the key value pair in NodeTypeHealthPolicyMapItem. + The key is the node type name. + This way, as long as no nodes of type "SpecialNodeType" are in Error + state, + even if some of the many nodes in the global pool are in Error state, but + below the global unhealthy percentage, the cluster would be evaluated to + Warning. + A Warning health state does not impact cluster upgrade or other monitoring + triggered by Error health state. + But even one node of type SpecialNodeType in Error would make cluster + unhealthy (in Error rather than Warning/Ok), which triggers rollback or + pauses the cluster upgrade, depending on the upgrade configuration. + Conversely, setting the global MaxPercentUnhealthyNodes to 0, and setting + SpecialNodeType's max percent unhealthy nodes to 100, + with one node of type SpecialNodeType in Error state would still put the + cluster in an Error state, since the global restriction is more strict in + this case. :type node_type_health_policy_map: list[~azure.servicefabric.models.NodeTypeHealthPolicyMapItem] """ @@ -6260,10 +5760,7 @@ class ClusterHealthPolicy(msrest.serialization.Model): 'node_type_health_policy_map': {'key': 'NodeTypeHealthPolicyMap', 'type': '[NodeTypeHealthPolicyMapItem]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterHealthPolicy, self).__init__(**kwargs) self.consider_warning_as_error = kwargs.get('consider_warning_as_error', False) self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', 0) @@ -6277,38 +5774,18 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param source_id: Required. Id of report source. :type source_id: str :param property: Required. Describes the property. @@ -6321,16 +5798,17 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, 'health_state': {'required': True}, @@ -6342,11 +5820,11 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, @@ -6357,32 +5835,33 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterHealthReportExpiredEvent, self).__init__(**kwargs) - self.kind = 'ClusterHealthReportExpired' # type: str - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] - - -class ClusterLoadInfo(msrest.serialization.Model): - """Information about load in a Service Fabric cluster. It holds a summary of all metrics and their load in a cluster. - - :param last_balancing_start_time_utc: The starting time of last resource balancing run. - :type last_balancing_start_time_utc: ~datetime.datetime - :param last_balancing_end_time_utc: The end time of last resource balancing run. - :type last_balancing_end_time_utc: ~datetime.datetime - :param load_metric_information: List that contains metrics and their load information in this - cluster. - :type load_metric_information: list[~azure.servicefabric.models.LoadMetricInformation] + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ClusterHealthReportExpired' + + +class ClusterLoadInfo(Model): + """Information about load in a Service Fabric cluster. It holds a summary of + all metrics and their load in a cluster. + + :param last_balancing_start_time_utc: The starting time of last resource + balancing run. + :type last_balancing_start_time_utc: datetime + :param last_balancing_end_time_utc: The end time of last resource + balancing run. + :type last_balancing_end_time_utc: datetime + :param load_metric_information: List that contains metrics and their load + information in this cluster. + :type load_metric_information: + list[~azure.servicefabric.models.LoadMetricInformation] """ _attribute_map = { @@ -6391,17 +5870,14 @@ class ClusterLoadInfo(msrest.serialization.Model): 'load_metric_information': {'key': 'LoadMetricInformation', 'type': '[LoadMetricInformation]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterLoadInfo, self).__init__(**kwargs) self.last_balancing_start_time_utc = kwargs.get('last_balancing_start_time_utc', None) self.last_balancing_end_time_utc = kwargs.get('last_balancing_end_time_utc', None) self.load_metric_information = kwargs.get('load_metric_information', None) -class ClusterManifest(msrest.serialization.Model): +class ClusterManifest(Model): """Information about the cluster manifest. :param manifest: The contents of the cluster manifest file. @@ -6412,10 +5888,7 @@ class ClusterManifest(msrest.serialization.Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterManifest, self).__init__(**kwargs) self.manifest = kwargs.get('manifest', None) @@ -6425,38 +5898,18 @@ class ClusterNewHealthReportEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param source_id: Required. Id of report source. :type source_id: str :param property: Required. Describes the property. @@ -6469,16 +5922,17 @@ class ClusterNewHealthReportEvent(ClusterEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, 'health_state': {'required': True}, @@ -6490,11 +5944,11 @@ class ClusterNewHealthReportEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, @@ -6505,20 +5959,17 @@ class ClusterNewHealthReportEvent(ClusterEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterNewHealthReportEvent, self).__init__(**kwargs) - self.kind = 'ClusterNewHealthReport' # type: str - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ClusterNewHealthReport' class ClusterUpgradeCompletedEvent(ClusterEvent): @@ -6526,115 +5977,102 @@ class ClusterUpgradeCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in - milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterUpgradeCompletedEvent, self).__init__(**kwargs) - self.kind = 'ClusterUpgradeCompleted' # type: str - self.target_cluster_version = kwargs['target_cluster_version'] - self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ClusterUpgradeCompleted' -class ClusterUpgradeDescriptionObject(msrest.serialization.Model): +class ClusterUpgradeDescriptionObject(Model): """Represents a ServiceFabric cluster upgrade. - :param config_version: The cluster configuration version (specified in the cluster manifest). + :param config_version: The cluster configuration version (specified in the + cluster manifest). :type config_version: str :param code_version: The ServiceFabric code version of the cluster. :type code_version: str - :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values - include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: The kind of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling'. Default value: + "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible - values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", - "ReverseLexicographical". Default value: "Default". + :param sort_order: Defines the order in which an upgrade proceeds through + the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', + 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default + value: "Default" . :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than - absolute health evaluation after completion of each upgrade domain. + :param enable_delta_health_evaluation: When true, enables delta health + evaluation rather than absolute health evaluation after completion of each + upgrade domain. :type enable_delta_health_evaluation: bool - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of - the cluster during a cluster upgrade. + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param cluster_upgrade_health_policy: Defines a health policy used to + evaluate the health of the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Represents the map of application health policies for a - ServiceFabric cluster upgrade. + :param application_health_policy_map: Represents the map of application + health policies for a ServiceFabric cluster upgrade :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicyMapObject """ @@ -6654,17 +6092,14 @@ class ClusterUpgradeDescriptionObject(msrest.serialization.Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicyMapObject'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterUpgradeDescriptionObject, self).__init__(**kwargs) self.config_version = kwargs.get('config_version', None) self.code_version = kwargs.get('code_version', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) - self.force_restart = kwargs.get('force_restart', False) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.force_restart = kwargs.get('force_restart', None) self.sort_order = kwargs.get('sort_order', "Default") self.enable_delta_health_evaluation = kwargs.get('enable_delta_health_evaluation', None) self.monitoring_policy = kwargs.get('monitoring_policy', None) @@ -6678,53 +6113,33 @@ class ClusterUpgradeDomainCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param upgrade_state: Required. State of upgrade. :type upgrade_state: str :param upgrade_domains: Required. Upgrade domains. :type upgrade_domains: str - :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain upgrade in - milli-seconds. + :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain + upgrade in milli-seconds. :type upgrade_domain_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'upgrade_state': {'required': True}, 'upgrade_domains': {'required': True}, @@ -6732,44 +6147,46 @@ class ClusterUpgradeDomainCompletedEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, 'upgrade_domains': {'key': 'UpgradeDomains', 'type': 'str'}, 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterUpgradeDomainCompletedEvent, self).__init__(**kwargs) - self.kind = 'ClusterUpgradeDomainCompleted' # type: str - self.target_cluster_version = kwargs['target_cluster_version'] - self.upgrade_state = kwargs['upgrade_state'] - self.upgrade_domains = kwargs['upgrade_domains'] - self.upgrade_domain_elapsed_time_in_ms = kwargs['upgrade_domain_elapsed_time_in_ms'] + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.upgrade_state = kwargs.get('upgrade_state', None) + self.upgrade_domains = kwargs.get('upgrade_domains', None) + self.upgrade_domain_elapsed_time_in_ms = kwargs.get('upgrade_domain_elapsed_time_in_ms', None) + self.kind = 'ClusterUpgradeDomainCompleted' -class ClusterUpgradeHealthPolicyObject(msrest.serialization.Model): - """Defines a health policy used to evaluate the health of the cluster during a cluster upgrade. +class ClusterUpgradeHealthPolicyObject(Model): + """Defines a health policy used to evaluate the health of the cluster during a + cluster upgrade. - :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage of nodes health - degradation allowed during cluster upgrades. The delta is measured between the state of the - nodes at the beginning of upgrade and the state of the nodes at the time of the health - evaluation. The check is performed after every upgrade domain upgrade completion to make sure - the global state of the cluster is within tolerated limits. The default value is 10%. + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage + of nodes health degradation allowed during cluster upgrades. The delta is + measured between the state of the nodes at the beginning of upgrade and + the state of the nodes at the time of the health evaluation. The check is + performed after every upgrade domain upgrade completion to make sure the + global state of the cluster is within tolerated limits. The default value + is 10%. :type max_percent_delta_unhealthy_nodes: int - :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum allowed percentage of - upgrade domain nodes health degradation allowed during cluster upgrades. The delta is measured - between the state of the upgrade domain nodes at the beginning of upgrade and the state of the - upgrade domain nodes at the time of the health evaluation. The check is performed after every - upgrade domain upgrade completion for all completed upgrade domains to make sure the state of - the upgrade domains is within tolerated limits. The default value is 15%. + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum + allowed percentage of upgrade domain nodes health degradation allowed + during cluster upgrades. The delta is measured between the state of the + upgrade domain nodes at the beginning of upgrade and the state of the + upgrade domain nodes at the time of the health evaluation. The check is + performed after every upgrade domain upgrade completion for all completed + upgrade domains to make sure the state of the upgrade domains is within + tolerated limits. The default value is 15%. :type max_percent_upgrade_domain_delta_unhealthy_nodes: int """ @@ -6783,60 +6200,63 @@ class ClusterUpgradeHealthPolicyObject(msrest.serialization.Model): 'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'MaxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterUpgradeHealthPolicyObject, self).__init__(**kwargs) self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', None) self.max_percent_upgrade_domain_delta_unhealthy_nodes = kwargs.get('max_percent_upgrade_domain_delta_unhealthy_nodes', None) -class ClusterUpgradeProgressObject(msrest.serialization.Model): +class ClusterUpgradeProgressObject(Model): """Information about a cluster upgrade. :param code_version: The ServiceFabric code version of the cluster. :type code_version: str - :param config_version: The cluster configuration version (specified in the cluster manifest). + :param config_version: The cluster configuration version (specified in the + cluster manifest). :type config_version: str :param upgrade_domains: List of upgrade domains and their statuses. :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] - :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", - "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", - "RollingForwardInProgress", "RollingForwardCompleted", "Failed". + :param upgrade_state: The state of the upgrade domain. Possible values + include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', + 'RollingForwardPending', 'RollingForwardInProgress', + 'RollingForwardCompleted', 'Failed' :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState - :param next_upgrade_domain: The name of the next upgrade domain to be processed. + :param next_upgrade_domain: The name of the next upgrade domain to be + processed. :type next_upgrade_domain: str - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_description: Represents a ServiceFabric cluster upgrade. - :type upgrade_description: ~azure.servicefabric.models.ClusterUpgradeDescriptionObject - :param upgrade_duration_in_milliseconds: The estimated elapsed time spent processing the - current overall upgrade. + :param upgrade_description: Represents a ServiceFabric cluster upgrade + :type upgrade_description: + ~azure.servicefabric.models.ClusterUpgradeDescriptionObject + :param upgrade_duration_in_milliseconds: The estimated elapsed time spent + processing the current overall upgrade. :type upgrade_duration_in_milliseconds: str - :param upgrade_domain_duration_in_milliseconds: The estimated elapsed time spent processing the - current upgrade domain. + :param upgrade_domain_duration_in_milliseconds: The estimated elapsed time + spent processing the current upgrade domain. :type upgrade_domain_duration_in_milliseconds: str - :param unhealthy_evaluations: List of health evaluations that resulted in the current - aggregated health state. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current in-progress upgrade - domain. + :param unhealthy_evaluations: List of health evaluations that resulted in + the current aggregated health state. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current + in-progress upgrade domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo :param start_timestamp_utc: The start time of the upgrade in UTC. :type start_timestamp_utc: str :param failure_timestamp_utc: The failure time of the upgrade in UTC. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being - executed. Possible values include: "None", "Interrupted", "HealthCheck", - "UpgradeDomainTimeout", "OverallUpgradeTimeout". + :param failure_reason: The cause of an upgrade failure that resulted in + FailureAction being executed. Possible values include: 'None', + 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', + 'OverallUpgradeTimeout' :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: The detailed upgrade progress for nodes in the - current upgrade domain at the point of failure. + :param upgrade_domain_progress_at_failure: The detailed upgrade progress + for nodes in the current upgrade domain at the point of failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailedUpgradeDomainProgressObject """ @@ -6859,10 +6279,7 @@ class ClusterUpgradeProgressObject(msrest.serialization.Model): 'upgrade_domain_progress_at_failure': {'key': 'UpgradeDomainProgressAtFailure', 'type': 'FailedUpgradeDomainProgressObject'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterUpgradeProgressObject, self).__init__(**kwargs) self.code_version = kwargs.get('code_version', None) self.config_version = kwargs.get('config_version', None) @@ -6886,76 +6303,53 @@ class ClusterUpgradeRollbackCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param failure_reason: Required. Describes failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in - milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'failure_reason': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterUpgradeRollbackCompletedEvent, self).__init__(**kwargs) - self.kind = 'ClusterUpgradeRollbackCompleted' # type: str - self.target_cluster_version = kwargs['target_cluster_version'] - self.failure_reason = kwargs['failure_reason'] - self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ClusterUpgradeRollbackCompleted' class ClusterUpgradeRollbackStartedEvent(ClusterEvent): @@ -6963,76 +6357,53 @@ class ClusterUpgradeRollbackStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param failure_reason: Required. Describes failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in - milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'failure_reason': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterUpgradeRollbackStartedEvent, self).__init__(**kwargs) - self.kind = 'ClusterUpgradeRollbackStarted' # type: str - self.target_cluster_version = kwargs['target_cluster_version'] - self.failure_reason = kwargs['failure_reason'] - self.overall_upgrade_elapsed_time_in_ms = kwargs['overall_upgrade_elapsed_time_in_ms'] + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ClusterUpgradeRollbackStarted' class ClusterUpgradeStartedEvent(ClusterEvent): @@ -7040,38 +6411,18 @@ class ClusterUpgradeStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param current_cluster_version: Required. Current Cluster version. :type current_cluster_version: str :param target_cluster_version: Required. Target Cluster version. @@ -7085,9 +6436,9 @@ class ClusterUpgradeStartedEvent(ClusterEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'current_cluster_version': {'required': True}, 'target_cluster_version': {'required': True}, 'upgrade_type': {'required': True}, @@ -7096,11 +6447,11 @@ class ClusterUpgradeStartedEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'current_cluster_version': {'key': 'CurrentClusterVersion', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'upgrade_type': {'key': 'UpgradeType', 'type': 'str'}, @@ -7108,20 +6459,17 @@ class ClusterUpgradeStartedEvent(ClusterEvent): 'failure_action': {'key': 'FailureAction', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterUpgradeStartedEvent, self).__init__(**kwargs) - self.kind = 'ClusterUpgradeStarted' # type: str - self.current_cluster_version = kwargs['current_cluster_version'] - self.target_cluster_version = kwargs['target_cluster_version'] - self.upgrade_type = kwargs['upgrade_type'] - self.rolling_upgrade_mode = kwargs['rolling_upgrade_mode'] - self.failure_action = kwargs['failure_action'] + self.current_cluster_version = kwargs.get('current_cluster_version', None) + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.upgrade_type = kwargs.get('upgrade_type', None) + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', None) + self.failure_action = kwargs.get('failure_action', None) + self.kind = 'ClusterUpgradeStarted' -class ClusterVersion(msrest.serialization.Model): +class ClusterVersion(Model): """The cluster version. :param version: The Service Fabric cluster runtime version. @@ -7132,37 +6480,38 @@ class ClusterVersion(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ClusterVersion, self).__init__(**kwargs) self.version = kwargs.get('version', None) -class CodePackageEntryPoint(msrest.serialization.Model): - """Information about setup or main entry point of a code package deployed on a Service Fabric node. +class CodePackageEntryPoint(Model): + """Information about setup or main entry point of a code package deployed on a + Service Fabric node. - :param entry_point_location: The location of entry point executable on the node. + :param entry_point_location: The location of entry point executable on the + node. :type entry_point_location: str :param process_id: The process ID of the entry point. :type process_id: str - :param run_as_user_name: The user name under which entry point executable is run on the node. + :param run_as_user_name: The user name under which entry point executable + is run on the node. :type run_as_user_name: str - :param code_package_entry_point_statistics: Statistics about setup or main entry point of a - code package deployed on a Service Fabric node. + :param code_package_entry_point_statistics: Statistics about setup or main + entry point of a code package deployed on a Service Fabric node. :type code_package_entry_point_statistics: ~azure.servicefabric.models.CodePackageEntryPointStatistics - :param status: Specifies the status of the code package entry point deployed on a Service - Fabric node. Possible values include: "Invalid", "Pending", "Starting", "Started", "Stopping", - "Stopped". + :param status: Specifies the status of the code package entry point + deployed on a Service Fabric node. Possible values include: 'Invalid', + 'Pending', 'Starting', 'Started', 'Stopping', 'Stopped' :type status: str or ~azure.servicefabric.models.EntryPointStatus - :param next_activation_time: The time (in UTC) when the entry point executable will be run - next. - :type next_activation_time: ~datetime.datetime - :param instance_id: The instance ID for current running entry point. For a code package setup - entry point (if specified) runs first and after it finishes main entry point is started. Each - time entry point executable is run, its instance id will change. + :param next_activation_time: The time (in UTC) when the entry point + executable will be run next. + :type next_activation_time: datetime + :param instance_id: The instance ID for current running entry point. For a + code package setup entry point (if specified) runs first and after it + finishes main entry point is started. Each time entry point executable is + run, its instance id will change. :type instance_id: str """ @@ -7176,10 +6525,7 @@ class CodePackageEntryPoint(msrest.serialization.Model): 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(CodePackageEntryPoint, self).__init__(**kwargs) self.entry_point_location = kwargs.get('entry_point_location', None) self.process_id = kwargs.get('process_id', None) @@ -7190,35 +6536,39 @@ def __init__( self.instance_id = kwargs.get('instance_id', None) -class CodePackageEntryPointStatistics(msrest.serialization.Model): - """Statistics about setup or main entry point of a code package deployed on a Service Fabric node. +class CodePackageEntryPointStatistics(Model): + """Statistics about setup or main entry point of a code package deployed on a + Service Fabric node. :param last_exit_code: The last exit code of the entry point. :type last_exit_code: str - :param last_activation_time: The last time (in UTC) when Service Fabric attempted to run the - entry point. - :type last_activation_time: ~datetime.datetime - :param last_exit_time: The last time (in UTC) when the entry point finished running. - :type last_exit_time: ~datetime.datetime - :param last_successful_activation_time: The last time (in UTC) when the entry point ran - successfully. - :type last_successful_activation_time: ~datetime.datetime - :param last_successful_exit_time: The last time (in UTC) when the entry point finished running - gracefully. - :type last_successful_exit_time: ~datetime.datetime + :param last_activation_time: The last time (in UTC) when Service Fabric + attempted to run the entry point. + :type last_activation_time: datetime + :param last_exit_time: The last time (in UTC) when the entry point + finished running. + :type last_exit_time: datetime + :param last_successful_activation_time: The last time (in UTC) when the + entry point ran successfully. + :type last_successful_activation_time: datetime + :param last_successful_exit_time: The last time (in UTC) when the entry + point finished running gracefully. + :type last_successful_exit_time: datetime :param activation_count: Number of times the entry point has run. :type activation_count: str - :param activation_failure_count: Number of times the entry point failed to run. + :param activation_failure_count: Number of times the entry point failed to + run. :type activation_failure_count: str - :param continuous_activation_failure_count: Number of times the entry point continuously failed - to run. + :param continuous_activation_failure_count: Number of times the entry + point continuously failed to run. :type continuous_activation_failure_count: str :param exit_count: Number of times the entry point finished running. :type exit_count: str - :param exit_failure_count: Number of times the entry point failed to exit gracefully. + :param exit_failure_count: Number of times the entry point failed to exit + gracefully. :type exit_failure_count: str - :param continuous_exit_failure_count: Number of times the entry point continuously failed to - exit gracefully. + :param continuous_exit_failure_count: Number of times the entry point + continuously failed to exit gracefully. :type continuous_exit_failure_count: str """ @@ -7236,10 +6586,7 @@ class CodePackageEntryPointStatistics(msrest.serialization.Model): 'continuous_exit_failure_count': {'key': 'ContinuousExitFailureCount', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(CodePackageEntryPointStatistics, self).__init__(**kwargs) self.last_exit_code = kwargs.get('last_exit_code', None) self.last_activation_time = kwargs.get('last_activation_time', None) @@ -7254,17 +6601,20 @@ def __init__( self.continuous_exit_failure_count = kwargs.get('continuous_exit_failure_count', None) -class ComposeDeploymentStatusInfo(msrest.serialization.Model): +class ComposeDeploymentStatusInfo(Model): """Information about a Service Fabric compose deployment. :param name: The name of the deployment. :type name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param status: The status of the compose deployment. Possible values include: "Invalid", - "Provisioning", "Creating", "Ready", "Unprovisioning", "Deleting", "Failed", "Upgrading". + :param status: The status of the compose deployment. Possible values + include: 'Invalid', 'Provisioning', 'Creating', 'Ready', 'Unprovisioning', + 'Deleting', 'Failed', 'Upgrading' :type status: str or ~azure.servicefabric.models.ComposeDeploymentStatus - :param status_details: The status details of compose deployment including failure message. + :param status_details: The status details of compose deployment including + failure message. :type status_details: str """ @@ -7275,10 +6625,7 @@ class ComposeDeploymentStatusInfo(msrest.serialization.Model): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ComposeDeploymentStatusInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.application_name = kwargs.get('application_name', None) @@ -7286,40 +6633,48 @@ def __init__( self.status_details = kwargs.get('status_details', None) -class ComposeDeploymentUpgradeDescription(msrest.serialization.Model): +class ComposeDeploymentUpgradeDescription(Model): """Describes the parameters for a compose deployment upgrade. All required parameters must be populated in order to send to Azure. :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: Required. The content of the compose file that describes the - deployment to create. + :param compose_file_content: Required. The content of the compose file + that describes the deployment to create. :type compose_file_content: str - :param registry_credential: Credential information to connect to container registry. + :param registry_credential: Credential information to connect to container + registry. :type registry_credential: ~azure.servicefabric.models.RegistryCredential - :param upgrade_kind: Required. The kind of upgrade out of the following possible values. - Possible values include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate the health of an - application or one of its children entities. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy """ _validation = { @@ -7340,94 +6695,105 @@ class ComposeDeploymentUpgradeDescription(msrest.serialization.Model): 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ComposeDeploymentUpgradeDescription, self).__init__(**kwargs) - self.deployment_name = kwargs['deployment_name'] - self.compose_file_content = kwargs['compose_file_content'] + self.deployment_name = kwargs.get('deployment_name', None) + self.compose_file_content = kwargs.get('compose_file_content', None) self.registry_credential = kwargs.get('registry_credential', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) - self.force_restart = kwargs.get('force_restart', False) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.force_restart = kwargs.get('force_restart', None) self.monitoring_policy = kwargs.get('monitoring_policy', None) self.application_health_policy = kwargs.get('application_health_policy', None) -class ComposeDeploymentUpgradeProgressInfo(msrest.serialization.Model): +class ComposeDeploymentUpgradeProgressInfo(Model): """Describes the parameters for a compose deployment upgrade. :param deployment_name: The name of the target deployment. :type deployment_name: str - :param application_name: The name of the target application, including the 'fabric:' URI - scheme. + :param application_name: The name of the target application, including the + 'fabric:' URI scheme. :type application_name: str - :param upgrade_state: The state of the compose deployment upgrade. Possible values include: - "Invalid", "ProvisioningTarget", "RollingForwardInProgress", "RollingForwardPending", - "UnprovisioningCurrent", "RollingForwardCompleted", "RollingBackInProgress", - "UnprovisioningTarget", "RollingBackCompleted", "Failed". - :type upgrade_state: str or ~azure.servicefabric.models.ComposeDeploymentUpgradeState - :param upgrade_status_details: Additional detailed information about the status of the pending - upgrade. + :param upgrade_state: The state of the compose deployment upgrade. + Possible values include: 'Invalid', 'ProvisioningTarget', + 'RollingForwardInProgress', 'RollingForwardPending', + 'UnprovisioningCurrent', 'RollingForwardCompleted', + 'RollingBackInProgress', 'UnprovisioningTarget', 'RollingBackCompleted', + 'Failed' + :type upgrade_state: str or + ~azure.servicefabric.models.ComposeDeploymentUpgradeState + :param upgrade_status_details: Additional detailed information about the + status of the pending upgrade. :type upgrade_status_details: str - :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values - include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: The kind of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling'. Default value: + "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate the health of an - application or one of its children entities. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :param target_application_type_version: The target application type version (found in the - application manifest) for the application upgrade. + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param target_application_type_version: The target application type + version (found in the application manifest) for the application upgrade. :type target_application_type_version: str - :param upgrade_duration: The estimated amount of time that the overall upgrade elapsed. It is - first interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param upgrade_duration: The estimated amount of time that the overall + upgrade elapsed. It is first interpreted as a string representing an ISO + 8601 duration. If that fails, then it is interpreted as a number + representing the total number of milliseconds. :type upgrade_duration: str - :param current_upgrade_domain_duration: The estimated amount of time spent processing current - Upgrade Domain. It is first interpreted as a string representing an ISO 8601 duration. If that - fails, then it is interpreted as a number representing the total number of milliseconds. + :param current_upgrade_domain_duration: The estimated amount of time spent + processing current Upgrade Domain. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type current_upgrade_domain_duration: str - :param application_unhealthy_evaluations: List of health evaluations that resulted in the - current aggregated health state. + :param application_unhealthy_evaluations: List of health evaluations that + resulted in the current aggregated health state. :type application_unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current in-progress upgrade - domain. + :param current_upgrade_domain_progress: Information about the current + in-progress upgrade domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo - :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade + started. :type start_timestamp_utc: str - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and - FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade + failed and FailureAction was executed. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being - executed. Possible values include: "None", "Interrupted", "HealthCheck", - "UpgradeDomainTimeout", "OverallUpgradeTimeout". + :param failure_reason: The cause of an upgrade failure that resulted in + FailureAction being executed. Possible values include: 'None', + 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', + 'OverallUpgradeTimeout' :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: Information about the upgrade domain progress at the - time of upgrade failure. + :param upgrade_domain_progress_at_failure: Information about the upgrade + domain progress at the time of upgrade failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo - :param application_upgrade_status_details: Additional details of application upgrade including - failure message. + :param application_upgrade_status_details: Additional details of + application upgrade including failure message. :type application_upgrade_status_details: str """ @@ -7454,10 +6820,7 @@ class ComposeDeploymentUpgradeProgressInfo(msrest.serialization.Model): 'application_upgrade_status_details': {'key': 'ApplicationUpgradeStatusDetails', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ComposeDeploymentUpgradeProgressInfo, self).__init__(**kwargs) self.deployment_name = kwargs.get('deployment_name', None) self.application_name = kwargs.get('application_name', None) @@ -7465,13 +6828,13 @@ def __init__( self.upgrade_status_details = kwargs.get('upgrade_status_details', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.force_restart = kwargs.get('force_restart', False) - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) + self.force_restart = kwargs.get('force_restart', None) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) self.monitoring_policy = kwargs.get('monitoring_policy', None) self.application_health_policy = kwargs.get('application_health_policy', None) self.target_application_type_version = kwargs.get('target_application_type_version', None) - self.upgrade_duration = kwargs.get('upgrade_duration', "PT0H2M0S") - self.current_upgrade_domain_duration = kwargs.get('current_upgrade_domain_duration', "PT0H2M0S") + self.upgrade_duration = kwargs.get('upgrade_duration', None) + self.current_upgrade_domain_duration = kwargs.get('current_upgrade_domain_duration', None) self.application_unhealthy_evaluations = kwargs.get('application_unhealthy_evaluations', None) self.current_upgrade_domain_progress = kwargs.get('current_upgrade_domain_progress', None) self.start_timestamp_utc = kwargs.get('start_timestamp_utc', None) @@ -7481,21 +6844,23 @@ def __init__( self.application_upgrade_status_details = kwargs.get('application_upgrade_status_details', None) -class ConfigParameterOverride(msrest.serialization.Model): +class ConfigParameterOverride(Model): """Information about a configuration parameter override. All required parameters must be populated in order to send to Azure. - :param section_name: Required. Name of the section for the parameter override. + :param section_name: Required. Name of the section for the parameter + override. :type section_name: str - :param parameter_name: Required. Name of the parameter that has been overridden. + :param parameter_name: Required. Name of the parameter that has been + overridden. :type parameter_name: str :param parameter_value: Required. Value of the overridden parameter. :type parameter_value: str :param timeout: The duration until config override is considered as valid. - :type timeout: ~datetime.timedelta - :param persist_across_upgrade: A value that indicates whether config override will be removed - on upgrade or will still be considered as valid. + :type timeout: timedelta + :param persist_across_upgrade: A value that indicates whether config + override will be removed on upgrade or will still be considered as valid. :type persist_across_upgrade: bool """ @@ -7513,31 +6878,28 @@ class ConfigParameterOverride(msrest.serialization.Model): 'persist_across_upgrade': {'key': 'PersistAcrossUpgrade', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ConfigParameterOverride, self).__init__(**kwargs) - self.section_name = kwargs['section_name'] - self.parameter_name = kwargs['parameter_name'] - self.parameter_value = kwargs['parameter_value'] + self.section_name = kwargs.get('section_name', None) + self.parameter_name = kwargs.get('parameter_name', None) + self.parameter_value = kwargs.get('parameter_value', None) self.timeout = kwargs.get('timeout', None) self.persist_across_upgrade = kwargs.get('persist_across_upgrade', None) -class ContainerApiRequestBody(msrest.serialization.Model): +class ContainerApiRequestBody(Model): """parameters for making container API call. All required parameters must be populated in order to send to Azure. - :param http_verb: HTTP verb of container REST API, defaults to "GET". + :param http_verb: HTTP verb of container REST API, defaults to "GET" :type http_verb: str - :param uri_path: Required. URI path of container REST API. + :param uri_path: Required. URI path of container REST API :type uri_path: str - :param content_type: Content type of container REST API request, defaults to - "application/json". + :param content_type: Content type of container REST API request, defaults + to "application/json" :type content_type: str - :param body: HTTP request body of container REST API. + :param body: HTTP request body of container REST API :type body: str """ @@ -7552,18 +6914,15 @@ class ContainerApiRequestBody(msrest.serialization.Model): 'body': {'key': 'Body', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerApiRequestBody, self).__init__(**kwargs) self.http_verb = kwargs.get('http_verb', None) - self.uri_path = kwargs['uri_path'] + self.uri_path = kwargs.get('uri_path', None) self.content_type = kwargs.get('content_type', None) self.body = kwargs.get('body', None) -class ContainerApiResponse(msrest.serialization.Model): +class ContainerApiResponse(Model): """Response body that wraps container API result. All required parameters must be populated in order to send to Azure. @@ -7580,26 +6939,24 @@ class ContainerApiResponse(msrest.serialization.Model): 'container_api_result': {'key': 'ContainerApiResult', 'type': 'ContainerApiResult'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerApiResponse, self).__init__(**kwargs) - self.container_api_result = kwargs['container_api_result'] + self.container_api_result = kwargs.get('container_api_result', None) -class ContainerApiResult(msrest.serialization.Model): +class ContainerApiResult(Model): """Container API result. All required parameters must be populated in order to send to Azure. - :param status: Required. HTTP status code returned by the target container API. + :param status: Required. HTTP status code returned by the target container + API :type status: int - :param content_type: HTTP content type. + :param content_type: HTTP content type :type content_type: str - :param content_encoding: HTTP content encoding. + :param content_encoding: HTTP content encoding :type content_encoding: str - :param body: container API result body. + :param body: container API result body :type body: str """ @@ -7614,21 +6971,19 @@ class ContainerApiResult(msrest.serialization.Model): 'body': {'key': 'Body', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerApiResult, self).__init__(**kwargs) - self.status = kwargs['status'] + self.status = kwargs.get('status', None) self.content_type = kwargs.get('content_type', None) self.content_encoding = kwargs.get('content_encoding', None) self.body = kwargs.get('body', None) -class ContainerCodePackageProperties(msrest.serialization.Model): +class ContainerCodePackageProperties(Model): """Describes a container and its runtime properties. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. @@ -7637,16 +6992,21 @@ class ContainerCodePackageProperties(msrest.serialization.Model): :param image: Required. The Container image to use. :type image: str :param image_registry_credential: Image registry credential. - :type image_registry_credential: ~azure.servicefabric.models.ImageRegistryCredential + :type image_registry_credential: + ~azure.servicefabric.models.ImageRegistryCredential :param entry_point: Override for the default entry point in the container. :type entry_point: str - :param commands: Command array to execute within the container in exec form. + :param commands: Command array to execute within the container in exec + form. :type commands: list[str] - :param environment_variables: The environment variables to set in this container. - :type environment_variables: list[~azure.servicefabric.models.EnvironmentVariable] - :param settings: The settings to set in this container. The setting file path can be fetched - from environment variable "Fabric_SettingPath". The path for Windows container is "C:\secrets". - The path for Linux container is "/var/secrets". + :param environment_variables: The environment variables to set in this + container + :type environment_variables: + list[~azure.servicefabric.models.EnvironmentVariable] + :param settings: The settings to set in this container. The setting file + path can be fetched from environment variable "Fabric_SettingPath". The + path for Windows container is "C:\\\\secrets". The path for Linux + container is "/var/secrets". :type settings: list[~azure.servicefabric.models.Setting] :param labels: The labels to set in this container. :type labels: list[~azure.servicefabric.models.ContainerLabel] @@ -7654,24 +7014,26 @@ class ContainerCodePackageProperties(msrest.serialization.Model): :type endpoints: list[~azure.servicefabric.models.EndpointProperties] :param resources: Required. The resources required by this container. :type resources: ~azure.servicefabric.models.ResourceRequirements - :param volume_refs: Volumes to be attached to the container. The lifetime of these volumes is - independent of the application's lifetime. + :param volume_refs: Volumes to be attached to the container. The lifetime + of these volumes is independent of the application's lifetime. :type volume_refs: list[~azure.servicefabric.models.VolumeReference] - :param volumes: Volumes to be attached to the container. The lifetime of these volumes is - scoped to the application's lifetime. + :param volumes: Volumes to be attached to the container. The lifetime of + these volumes is scoped to the application's lifetime. :type volumes: list[~azure.servicefabric.models.ApplicationScopedVolume] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef - :param reliable_collections_refs: A list of ReliableCollection resources used by this - particular code package. Please refer to ReliableCollectionsRef for more details. - :type reliable_collections_refs: list[~azure.servicefabric.models.ReliableCollectionsRef] + :param reliable_collections_refs: A list of ReliableCollection resources + used by this particular code package. Please refer to + ReliableCollectionsRef for more details. + :type reliable_collections_refs: + list[~azure.servicefabric.models.ReliableCollectionsRef] :ivar instance_view: Runtime information of a container instance. :vartype instance_view: ~azure.servicefabric.models.ContainerInstanceView - :param liveness_probe: An array of liveness probes for a code package. It determines when to - restart a code package. + :param liveness_probe: An array of liveness probes for a code package. It + determines when to restart a code package. :type liveness_probe: list[~azure.servicefabric.models.Probe] - :param readiness_probe: An array of readiness probes for a code package. It determines when to - unpublish an endpoint. + :param readiness_probe: An array of readiness probes for a code package. + It determines when to unpublish an endpoint. :type readiness_probe: list[~azure.servicefabric.models.Probe] """ @@ -7702,13 +7064,10 @@ class ContainerCodePackageProperties(msrest.serialization.Model): 'readiness_probe': {'key': 'readinessProbe', 'type': '[Probe]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerCodePackageProperties, self).__init__(**kwargs) - self.name = kwargs['name'] - self.image = kwargs['image'] + self.name = kwargs.get('name', None) + self.image = kwargs.get('image', None) self.image_registry_credential = kwargs.get('image_registry_credential', None) self.entry_point = kwargs.get('entry_point', None) self.commands = kwargs.get('commands', None) @@ -7716,7 +7075,7 @@ def __init__( self.settings = kwargs.get('settings', None) self.labels = kwargs.get('labels', None) self.endpoints = kwargs.get('endpoints', None) - self.resources = kwargs['resources'] + self.resources = kwargs.get('resources', None) self.volume_refs = kwargs.get('volume_refs', None) self.volumes = kwargs.get('volumes', None) self.diagnostics = kwargs.get('diagnostics', None) @@ -7726,7 +7085,7 @@ def __init__( self.readiness_probe = kwargs.get('readiness_probe', None) -class ContainerEvent(msrest.serialization.Model): +class ContainerEvent(Model): """A container event. :param name: The name of the container event. @@ -7737,7 +7096,7 @@ class ContainerEvent(msrest.serialization.Model): :type first_timestamp: str :param last_timestamp: Date/time of the last event. :type last_timestamp: str - :param message: The event message. + :param message: The event message :type message: str :param type: The event type. :type type: str @@ -7752,10 +7111,7 @@ class ContainerEvent(msrest.serialization.Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerEvent, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.count = kwargs.get('count', None) @@ -7770,66 +7126,44 @@ class ContainerInstanceEvent(FabricEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerInstanceEvent, self).__init__(**kwargs) - self.kind = 'ContainerInstanceEvent' # type: str + self.kind = 'ContainerInstanceEvent' -class ContainerInstanceView(msrest.serialization.Model): +class ContainerInstanceView(Model): """Runtime information of a container instance. - :param restart_count: The number of times the container has been restarted. + :param restart_count: The number of times the container has been + restarted. :type restart_count: int :param current_state: Current container instance state. :type current_state: ~azure.servicefabric.models.ContainerState @@ -7846,10 +7180,7 @@ class ContainerInstanceView(msrest.serialization.Model): 'events': {'key': 'events', 'type': '[ContainerEvent]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerInstanceView, self).__init__(**kwargs) self.restart_count = kwargs.get('restart_count', None) self.current_state = kwargs.get('current_state', None) @@ -7857,7 +7188,7 @@ def __init__( self.events = kwargs.get('events', None) -class ContainerLabel(msrest.serialization.Model): +class ContainerLabel(Model): """Describes a container label. All required parameters must be populated in order to send to Azure. @@ -7878,16 +7209,13 @@ class ContainerLabel(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerLabel, self).__init__(**kwargs) - self.name = kwargs['name'] - self.value = kwargs['value'] + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) -class ContainerLogs(msrest.serialization.Model): +class ContainerLogs(Model): """Container logs. :param content: Container logs. @@ -7898,25 +7226,22 @@ class ContainerLogs(msrest.serialization.Model): 'content': {'key': 'Content', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerLogs, self).__init__(**kwargs) self.content = kwargs.get('content', None) -class ContainerState(msrest.serialization.Model): +class ContainerState(Model): """The container state. - :param state: The state of this container. + :param state: The state of this container :type state: str :param start_time: Date/time when the container state started. - :type start_time: ~datetime.datetime + :type start_time: datetime :param exit_code: The container exit code. :type exit_code: str :param finish_time: Date/time when the container state finished. - :type finish_time: ~datetime.datetime + :type finish_time: datetime :param detail_status: Human-readable status of this state. :type detail_status: str """ @@ -7929,10 +7254,7 @@ class ContainerState(msrest.serialization.Model): 'detail_status': {'key': 'detailStatus', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ContainerState, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.start_time = kwargs.get('start_time', None) @@ -7941,17 +7263,18 @@ def __init__( self.detail_status = kwargs.get('detail_status', None) -class CreateComposeDeploymentDescription(msrest.serialization.Model): +class CreateComposeDeploymentDescription(Model): """Defines description for creating a Service Fabric compose deployment. All required parameters must be populated in order to send to Azure. :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: Required. The content of the compose file that describes the - deployment to create. + :param compose_file_content: Required. The content of the compose file + that describes the deployment to create. :type compose_file_content: str - :param registry_credential: Credential information to connect to container registry. + :param registry_credential: Credential information to connect to container + registry. :type registry_credential: ~azure.servicefabric.models.RegistryCredential """ @@ -7966,23 +7289,22 @@ class CreateComposeDeploymentDescription(msrest.serialization.Model): 'registry_credential': {'key': 'RegistryCredential', 'type': 'RegistryCredential'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(CreateComposeDeploymentDescription, self).__init__(**kwargs) - self.deployment_name = kwargs['deployment_name'] - self.compose_file_content = kwargs['compose_file_content'] + self.deployment_name = kwargs.get('deployment_name', None) + self.compose_file_content = kwargs.get('compose_file_content', None) self.registry_credential = kwargs.get('registry_credential', None) -class CurrentUpgradeDomainProgressInfo(msrest.serialization.Model): +class CurrentUpgradeDomainProgressInfo(Model): """Information about the current in-progress upgrade domain. - :param domain_name: The name of the upgrade domain. + :param domain_name: The name of the upgrade domain :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their statuses. - :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their + statuses + :type node_upgrade_progress_list: + list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -7990,46 +7312,41 @@ class CurrentUpgradeDomainProgressInfo(msrest.serialization.Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(CurrentUpgradeDomainProgressInfo, self).__init__(**kwargs) self.domain_name = kwargs.get('domain_name', None) self.node_upgrade_progress_list = kwargs.get('node_upgrade_progress_list', None) -class DeactivationIntentDescription(msrest.serialization.Model): +class DeactivationIntentDescription(Model): """Describes the intent or reason for deactivating the node. - :param deactivation_intent: Describes the intent or reason for deactivating the node. The - possible values are following. Possible values include: "Pause", "Restart", "RemoveData". - :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent + :param deactivation_intent: Describes the intent or reason for + deactivating the node. The possible values are following. Possible values + include: 'Pause', 'Restart', 'RemoveData' + :type deactivation_intent: str or + ~azure.servicefabric.models.DeactivationIntent """ _attribute_map = { 'deactivation_intent': {'key': 'DeactivationIntent', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeactivationIntentDescription, self).__init__(**kwargs) self.deactivation_intent = kwargs.get('deactivation_intent', None) -class ExecutionPolicy(msrest.serialization.Model): +class ExecutionPolicy(Model): """The execution policy of the service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DefaultExecutionPolicy, RunToCompletionExecutionPolicy. + sub-classes are: DefaultExecutionPolicy, RunToCompletionExecutionPolicy All required parameters must be populated in order to send to Azure. - :param type: Required. Enumerates the execution policy types for services.Constant filled by - server. Possible values include: "Default", "RunToCompletion". - :type type: str or ~azure.servicefabric.models.ExecutionPolicyType + :param type: Required. Constant filled by server. + :type type: str """ _validation = { @@ -8044,12 +7361,9 @@ class ExecutionPolicy(msrest.serialization.Model): 'type': {'Default': 'DefaultExecutionPolicy', 'RunToCompletion': 'RunToCompletionExecutionPolicy'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ExecutionPolicy, self).__init__(**kwargs) - self.type = None # type: Optional[str] + self.type = None class DefaultExecutionPolicy(ExecutionPolicy): @@ -8057,9 +7371,8 @@ class DefaultExecutionPolicy(ExecutionPolicy): All required parameters must be populated in order to send to Azure. - :param type: Required. Enumerates the execution policy types for services.Constant filled by - server. Possible values include: "Default", "RunToCompletion". - :type type: str or ~azure.servicefabric.models.ExecutionPolicyType + :param type: Required. Constant filled by server. + :type type: str """ _validation = { @@ -8070,85 +7383,76 @@ class DefaultExecutionPolicy(ExecutionPolicy): 'type': {'key': 'type', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DefaultExecutionPolicy, self).__init__(**kwargs) - self.type = 'Default' # type: str + self.type = 'Default' class DeletePropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that deletes a specified property if it exists. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that deletes a specified property if it + exists. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeletePropertyBatchOperation, self).__init__(**kwargs) - self.kind = 'Delete' # type: str + self.kind = 'Delete' class DeltaNodesCheckHealthEvaluation(HealthEvaluation): - """Represents health evaluation for delta nodes, containing health evaluations for each unhealthy node that impacted current aggregated health state. -Can be returned during cluster upgrade when the aggregated health state of the cluster is Warning or Error. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for delta nodes, containing health evaluations + for each unhealthy node that impacted current aggregated health state. + Can be returned during cluster upgrade when the aggregated health state of + the cluster is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param baseline_error_count: Number of nodes with aggregated heath state Error in the health - store at the beginning of the cluster upgrade. + :param kind: Required. Constant filled by server. + :type kind: str + :param baseline_error_count: Number of nodes with aggregated heath state + Error in the health store at the beginning of the cluster upgrade. :type baseline_error_count: long - :param baseline_total_count: Total number of nodes in the health store at the beginning of the - cluster upgrade. + :param baseline_total_count: Total number of nodes in the health store at + the beginning of the cluster upgrade. :type baseline_total_count: long - :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of delta unhealthy nodes - from the ClusterUpgradeHealthPolicy. + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of + delta unhealthy nodes from the ClusterUpgradeHealthPolicy. :type max_percent_delta_unhealthy_nodes: int :param total_count: Total number of nodes in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. - Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. + Includes all the unhealthy NodeHealthEvaluation that impacted the + aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -8156,9 +7460,9 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, 'max_percent_delta_unhealthy_nodes': {'key': 'MaxPercentDeltaUnhealthyNodes', 'type': 'int'}, @@ -8166,44 +7470,45 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeltaNodesCheckHealthEvaluation, self).__init__(**kwargs) - self.kind = 'DeltaNodesCheck' # type: str self.baseline_error_count = kwargs.get('baseline_error_count', None) self.baseline_total_count = kwargs.get('baseline_total_count', None) self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'DeltaNodesCheck' class DeployedApplicationHealth(EntityHealth): - """Information about the health of an application deployed on a Service Fabric node. - - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + """Information about the health of an application deployed on a Service Fabric + node. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: Name of the application deployed on the node whose health information is described - by this object. + :param name: Name of the application deployed on the node whose health + information is described by this object. :type name: str :param node_name: Name of the node where this application is deployed. :type node_name: str - :param deployed_service_package_health_states: Deployed service package health states for the - current deployed application as found in the health store. + :param deployed_service_package_health_states: Deployed service package + health states for the current deployed application as found in the health + store. :type deployed_service_package_health_states: list[~azure.servicefabric.models.DeployedServicePackageHealthState] """ @@ -8218,10 +7523,7 @@ class DeployedApplicationHealth(EntityHealth): 'deployed_service_package_health_states': {'key': 'DeployedServicePackageHealthStates', 'type': '[DeployedServicePackageHealthState]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationHealth, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.node_name = kwargs.get('node_name', None) @@ -8229,36 +7531,34 @@ def __init__( class DeployedApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a deployed application, containing information about the data and the algorithm used by the health store to evaluate health. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a deployed application, containing + information about the data and the algorithm used by the health store to + evaluate health. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Name of the node where the application is deployed to. :type node_name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the deployed application. - The types of the unhealthy evaluations can be DeployedServicePackagesHealthEvaluation or - EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the deployed application. + The types of the unhealthy evaluations can be + DeployedServicePackagesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -8266,23 +7566,20 @@ class DeployedApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationHealthEvaluation, self).__init__(**kwargs) - self.kind = 'DeployedApplication' # type: str self.node_name = kwargs.get('node_name', None) self.application_name = kwargs.get('application_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'DeployedApplication' class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): @@ -8290,44 +7587,25 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -8345,16 +7623,17 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'node_name': {'required': True}, @@ -8369,11 +7648,11 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, @@ -8387,34 +7666,36 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationHealthReportExpiredEvent, self).__init__(**kwargs) - self.kind = 'DeployedApplicationHealthReportExpired' # type: str - self.application_instance_id = kwargs['application_instance_id'] - self.node_name = kwargs['node_name'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.application_instance_id = kwargs.get('application_instance_id', None) + self.node_name = kwargs.get('node_name', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'DeployedApplicationHealthReportExpired' class DeployedApplicationHealthState(EntityHealthState): - """Represents the health state of a deployed application, which contains the entity identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param node_name: Name of the node on which the service package is deployed. + """Represents the health state of a deployed application, which contains the + entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is + deployed. :type node_name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str """ @@ -8424,27 +7705,27 @@ class DeployedApplicationHealthState(EntityHealthState): 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationHealthState, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.application_name = kwargs.get('application_name', None) class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a deployed application, which contains the node where the application is deployed, the aggregated health state and any deployed service packages that respect the chunk query description filters. - - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + """Represents the health state chunk of a deployed application, which contains + the node where the application is deployed, the aggregated health state and + any deployed service packages that respect the chunk query description + filters. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of node where the application is deployed. :type node_name: str - :param deployed_service_package_health_state_chunks: The list of deployed service package - health state chunks belonging to the deployed application that respect the filters in the - cluster health chunk query description. + :param deployed_service_package_health_state_chunks: The list of deployed + service package health state chunks belonging to the deployed application + that respect the filters in the cluster health chunk query description. :type deployed_service_package_health_state_chunks: ~azure.servicefabric.models.DeployedServicePackageHealthStateChunkList """ @@ -8455,81 +7736,93 @@ class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_service_package_health_state_chunks': {'key': 'DeployedServicePackageHealthStateChunks', 'type': 'DeployedServicePackageHealthStateChunkList'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationHealthStateChunk, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.deployed_service_package_health_state_chunks = kwargs.get('deployed_service_package_health_state_chunks', None) -class DeployedApplicationHealthStateChunkList(msrest.serialization.Model): - """The list of deployed application health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. +class DeployedApplicationHealthStateChunkList(Model): + """The list of deployed application health state chunks that respect the input + filters in the chunk query. Returned by get cluster health state chunks + query. - :param items: The list of deployed application health state chunks that respect the input - filters in the chunk query. - :type items: list[~azure.servicefabric.models.DeployedApplicationHealthStateChunk] + :param items: The list of deployed application health state chunks that + respect the input filters in the chunk query. + :type items: + list[~azure.servicefabric.models.DeployedApplicationHealthStateChunk] """ _attribute_map = { 'items': {'key': 'Items', 'type': '[DeployedApplicationHealthStateChunk]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class DeployedApplicationHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a deployed application should be included as a child of an application in the cluster health chunk. -The deployed applications are only returned if the parent application matches a filter specified in the cluster health chunk query description. -One filter can match zero, one or multiple deployed applications, depending on its properties. - - :param node_name_filter: The name of the node where the application is deployed in order to - match the filter. - If specified, the filter is applied only to the application deployed on the specified node. - If the application is not deployed on the node with the specified name, no deployed - application is returned in the cluster health chunk based on this filter. - Otherwise, the deployed application is included in the cluster health chunk if it respects the - other filter properties. - If not specified, all deployed applications that match the parent filters (if any) are taken - into consideration and matched against the other filter members, like health state filter. +class DeployedApplicationHealthStateFilter(Model): + """Defines matching criteria to determine whether a deployed application + should be included as a child of an application in the cluster health + chunk. + The deployed applications are only returned if the parent application + matches a filter specified in the cluster health chunk query description. + One filter can match zero, one or multiple deployed applications, depending + on its properties. + + :param node_name_filter: The name of the node where the application is + deployed in order to match the filter. + If specified, the filter is applied only to the application deployed on + the specified node. + If the application is not deployed on the node with the specified name, no + deployed application is returned in the cluster health chunk based on this + filter. + Otherwise, the deployed application is included in the cluster health + chunk if it respects the other filter properties. + If not specified, all deployed applications that match the parent filters + (if any) are taken into consideration and matched against the other filter + members, like health state filter. :type node_name_filter: str - :param health_state_filter: The filter for the health state of the deployed applications. It - allows selecting deployed applications if they match the desired health states. - The possible values are integer value of one of the following health states. Only deployed - applications that match the filter are returned. All deployed applications are used to evaluate - the cluster aggregated health state. - If not specified, default value is None, unless the node name is specified. If the filter has - default value and node name is specified, the matching deployed application is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches deployed applications with HealthState - value of OK (2) and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + deployed applications. It allows selecting deployed applications if they + match the desired health states. + The possible values are integer value of one of the following health + states. Only deployed applications that match the filter are returned. All + deployed applications are used to evaluate the cluster aggregated health + state. + If not specified, default value is None, unless the node name is + specified. If the filter has default value and node name is specified, the + matching deployed application is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed applications + with HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int - :param deployed_service_package_filters: Defines a list of filters that specify which deployed - service packages to be included in the returned cluster health chunk as children of the parent - deployed application. The deployed service packages are returned only if the parent deployed + :param deployed_service_package_filters: Defines a list of filters that + specify which deployed service packages to be included in the returned + cluster health chunk as children of the parent deployed application. The + deployed service packages are returned only if the parent deployed application matches a filter. - If the list is empty, no deployed service packages are returned. All the deployed service - packages are used to evaluate the parent deployed application aggregated health state, - regardless of the input filters. - The deployed application filter may specify multiple deployed service package filters. - For example, it can specify a filter to return all deployed service packages with health state - Error and another filter to always include a deployed service package on a node. + If the list is empty, no deployed service packages are returned. All the + deployed service packages are used to evaluate the parent deployed + application aggregated health state, regardless of the input filters. + The deployed application filter may specify multiple deployed service + package filters. + For example, it can specify a filter to return all deployed service + packages with health state Error and another filter to always include a + deployed service package on a node. :type deployed_service_package_filters: list[~azure.servicefabric.models.DeployedServicePackageHealthStateFilter] """ @@ -8540,45 +7833,47 @@ class DeployedApplicationHealthStateFilter(msrest.serialization.Model): 'deployed_service_package_filters': {'key': 'DeployedServicePackageFilters', 'type': '[DeployedServicePackageHealthStateFilter]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationHealthStateFilter, self).__init__(**kwargs) self.node_name_filter = kwargs.get('node_name_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) self.deployed_service_package_filters = kwargs.get('deployed_service_package_filters', None) -class DeployedApplicationInfo(msrest.serialization.Model): +class DeployedApplicationInfo(Model): """Information about application deployed on the node. - :param id: The identity of the application. This is an encoded representation of the - application name. This is used in the REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI scheme. + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str - :param type_name: The application type name as defined in the application manifest. + :param type_name: The application type name as defined in the application + manifest. :type type_name: str - :param status: The status of the application deployed on the node. Following are the possible - values. Possible values include: "Invalid", "Downloading", "Activating", "Active", "Upgrading", - "Deactivating". + :param status: The status of the application deployed on the node. + Following are the possible values. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' :type status: str or ~azure.servicefabric.models.DeployedApplicationStatus - :param work_directory: The work directory of the application on the node. The work directory - can be used to store application data. + :param work_directory: The work directory of the application on the node. + The work directory can be used to store application data. :type work_directory: str - :param log_directory: The log directory of the application on the node. The log directory can - be used to store application logs. + :param log_directory: The log directory of the application on the node. + The log directory can be used to store application logs. :type log_directory: str - :param temp_directory: The temp directory of the application on the node. The code packages - belonging to the application are forked with this directory set as their temporary directory. + :param temp_directory: The temp directory of the application on the node. + The code packages belonging to the application are forked with this + directory set as their temporary directory. :type temp_directory: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState """ @@ -8593,10 +7888,7 @@ class DeployedApplicationInfo(msrest.serialization.Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.name = kwargs.get('name', None) @@ -8613,44 +7905,25 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -8668,16 +7941,17 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'node_name': {'required': True}, @@ -8692,11 +7966,11 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, @@ -8710,57 +7984,53 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationNewHealthReportEvent, self).__init__(**kwargs) - self.kind = 'DeployedApplicationNewHealthReport' # type: str - self.application_instance_id = kwargs['application_instance_id'] - self.node_name = kwargs['node_name'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.application_instance_id = kwargs.get('application_instance_id', None) + self.node_name = kwargs.get('node_name', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'DeployedApplicationNewHealthReport' class DeployedApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for deployed applications, containing health evaluations for each unhealthy deployed application that impacted current aggregated health state. -Can be returned when evaluating application health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for deployed applications, containing health + evaluations for each unhealthy deployed application that impacted current + aggregated health state. + Can be returned when evaluating application health and the aggregated + health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_deployed_applications: Maximum allowed percentage of unhealthy - deployed applications from the ApplicationHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_deployed_applications: Maximum allowed + percentage of unhealthy deployed applications from the + ApplicationHealthPolicy. :type max_percent_unhealthy_deployed_applications: int - :param total_count: Total number of deployed applications of the application in the health - store. + :param total_count: Total number of deployed applications of the + application in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy DeployedApplicationHealthEvaluation that impacted the - aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + DeployedApplicationHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -8768,60 +8038,64 @@ class DeployedApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_deployed_applications': {'key': 'MaxPercentUnhealthyDeployedApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedApplicationsHealthEvaluation, self).__init__(**kwargs) - self.kind = 'DeployedApplications' # type: str self.max_percent_unhealthy_deployed_applications = kwargs.get('max_percent_unhealthy_deployed_applications', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'DeployedApplications' -class DeployedCodePackageInfo(msrest.serialization.Model): +class DeployedCodePackageInfo(Model): """Information about code package deployed on a Service Fabric node. :param name: The name of the code package. :type name: str - :param version: The version of the code package specified in service manifest. + :param version: The version of the code package specified in service + manifest. :type version: str - :param service_manifest_name: The name of service manifest that specified this code package. + :param service_manifest_name: The name of service manifest that specified + this code package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_type: Specifies the type of host for main entry point of a code package as - specified in service manifest. Possible values include: "Invalid", "ExeHost", "ContainerHost". + :param host_type: Specifies the type of host for main entry point of a + code package as specified in service manifest. Possible values include: + 'Invalid', 'ExeHost', 'ContainerHost' :type host_type: str or ~azure.servicefabric.models.HostType - :param host_isolation_mode: Specifies the isolation mode of main entry point of a code package - when it's host type is ContainerHost. This is specified as part of container host policies in - application manifest while importing service manifest. Possible values include: "None", - "Process", "HyperV". - :type host_isolation_mode: str or ~azure.servicefabric.models.HostIsolationMode - :param status: Specifies the status of a deployed application or service package on a Service - Fabric node. Possible values include: "Invalid", "Downloading", "Activating", "Active", - "Upgrading", "Deactivating", "RanToCompletion", "Failed". + :param host_isolation_mode: Specifies the isolation mode of main entry + point of a code package when it's host type is ContainerHost. This is + specified as part of container host policies in application manifest while + importing service manifest. Possible values include: 'None', 'Process', + 'HyperV' + :type host_isolation_mode: str or + ~azure.servicefabric.models.HostIsolationMode + :param status: Specifies the status of a deployed application or service + package on a Service Fabric node. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating', + 'RanToCompletion', 'Failed' :type status: str or ~azure.servicefabric.models.DeploymentStatus - :param run_frequency_interval: The interval at which code package is run. This is used for - periodic code package. + :param run_frequency_interval: The interval at which code package is run. + This is used for periodic code package. :type run_frequency_interval: str - :param setup_entry_point: Information about setup or main entry point of a code package - deployed on a Service Fabric node. + :param setup_entry_point: Information about setup or main entry point of a + code package deployed on a Service Fabric node. :type setup_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint - :param main_entry_point: Information about setup or main entry point of a code package deployed - on a Service Fabric node. + :param main_entry_point: Information about setup or main entry point of a + code package deployed on a Service Fabric node. :type main_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint """ @@ -8838,10 +8112,7 @@ class DeployedCodePackageInfo(msrest.serialization.Model): 'main_entry_point': {'key': 'MainEntryPoint', 'type': 'CodePackageEntryPoint'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedCodePackageInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.version = kwargs.get('version', None) @@ -8856,24 +8127,28 @@ def __init__( class DeployedServicePackageHealth(EntityHealth): - """Information about the health of a service package for a specific application deployed on a Service Fabric node. - - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + """Information about the health of a service package for a specific + application deployed on a Service Fabric node. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str :param service_manifest_name: Name of the service manifest. :type service_manifest_name: str @@ -8891,10 +8166,7 @@ class DeployedServicePackageHealth(EntityHealth): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackageHealth, self).__init__(**kwargs) self.application_name = kwargs.get('application_name', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) @@ -8902,36 +8174,36 @@ def __init__( class DeployedServicePackageHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a deployed service package, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a deployed service package, containing + information about the data and the algorithm used by health store to + evaluate health. The evaluation is returned only when the aggregated health + state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str :param service_manifest_name: The name of the service manifest. :type service_manifest_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state. The type of the unhealthy evaluations can be EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state. The type of the unhealthy evaluations + can be EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -8939,25 +8211,22 @@ class DeployedServicePackageHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackageHealthEvaluation, self).__init__(**kwargs) - self.kind = 'DeployedServicePackage' # type: str self.node_name = kwargs.get('node_name', None) self.application_name = kwargs.get('application_name', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'DeployedServicePackage' class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): @@ -8965,50 +8234,33 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param service_manifest: Required. Service manifest name. :type service_manifest: str - :param service_package_instance_id: Required. Id of Service package instance. + :param service_package_instance_id: Required. Id of Service package + instance. :type service_package_instance_id: long - :param service_package_activation_id: Required. Id of Service package activation. + :param service_package_activation_id: Required. Id of Service package + activation. :type service_package_activation_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -9024,16 +8276,17 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'service_manifest': {'required': True}, 'service_package_instance_id': {'required': True}, @@ -9050,11 +8303,11 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_manifest': {'key': 'ServiceManifest', 'type': 'str'}, 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, @@ -9070,43 +8323,47 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackageHealthReportExpiredEvent, self).__init__(**kwargs) - self.kind = 'DeployedServicePackageHealthReportExpired' # type: str - self.service_manifest = kwargs['service_manifest'] - self.service_package_instance_id = kwargs['service_package_instance_id'] - self.service_package_activation_id = kwargs['service_package_activation_id'] - self.node_name = kwargs['node_name'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] - - -class DeployedServicePackageHealthState(EntityHealthState): - """Represents the health state of a deployed service package, containing the entity identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param node_name: Name of the node on which the service package is deployed. - :type node_name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + self.service_manifest = kwargs.get('service_manifest', None) + self.service_package_instance_id = kwargs.get('service_package_instance_id', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.node_name = kwargs.get('node_name', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'DeployedServicePackageHealthReportExpired' + + +class DeployedServicePackageHealthState(EntityHealthState): + """Represents the health state of a deployed service package, containing the + entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is + deployed. + :type node_name: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param service_manifest_name: Name of the manifest describing the service package. + :param service_manifest_name: Name of the manifest describing the service + package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -9119,10 +8376,7 @@ class DeployedServicePackageHealthState(EntityHealthState): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackageHealthState, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.application_name = kwargs.get('application_name', None) @@ -9131,18 +8385,21 @@ def __init__( class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a deployed service package, which contains the service manifest name and the service package aggregated health state. + """Represents the health state chunk of a deployed service package, which + contains the service manifest name and the service package aggregated + health state. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param service_manifest_name: The name of the service manifest. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -9153,79 +8410,88 @@ class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackageHealthStateChunk, self).__init__(**kwargs) self.service_manifest_name = kwargs.get('service_manifest_name', None) self.service_package_activation_id = kwargs.get('service_package_activation_id', None) -class DeployedServicePackageHealthStateChunkList(msrest.serialization.Model): - """The list of deployed service package health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. +class DeployedServicePackageHealthStateChunkList(Model): + """The list of deployed service package health state chunks that respect the + input filters in the chunk query. Returned by get cluster health state + chunks query. - :param items: The list of deployed service package health state chunks that respect the input - filters in the chunk query. - :type items: list[~azure.servicefabric.models.DeployedServicePackageHealthStateChunk] + :param items: The list of deployed service package health state chunks + that respect the input filters in the chunk query. + :type items: + list[~azure.servicefabric.models.DeployedServicePackageHealthStateChunk] """ _attribute_map = { 'items': {'key': 'Items', 'type': '[DeployedServicePackageHealthStateChunk]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackageHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class DeployedServicePackageHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a deployed service package should be included as a child of a deployed application in the cluster health chunk. -The deployed service packages are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent deployed application and its parent application must be included in the cluster health chunk. -One filter can match zero, one or multiple deployed service packages, depending on its properties. - - :param service_manifest_name_filter: The name of the service manifest which identifies the - deployed service packages that matches the filter. - If specified, the filter is applied only to the specified deployed service packages, if any. - If no deployed service packages with specified manifest name exist, nothing is returned in the - cluster health chunk based on this filter. - If any deployed service package exists, they are included in the cluster health chunk if it - respects the other filter properties. - If not specified, all deployed service packages that match the parent filters (if any) are - taken into consideration and matched against the other filter members, like health state - filter. +class DeployedServicePackageHealthStateFilter(Model): + """Defines matching criteria to determine whether a deployed service package + should be included as a child of a deployed application in the cluster + health chunk. + The deployed service packages are only returned if the parent entities + match a filter specified in the cluster health chunk query description. The + parent deployed application and its parent application must be included in + the cluster health chunk. + One filter can match zero, one or multiple deployed service packages, + depending on its properties. + + :param service_manifest_name_filter: The name of the service manifest + which identifies the deployed service packages that matches the filter. + If specified, the filter is applied only to the specified deployed service + packages, if any. + If no deployed service packages with specified manifest name exist, + nothing is returned in the cluster health chunk based on this filter. + If any deployed service package exists, they are included in the cluster + health chunk if it respects the other filter properties. + If not specified, all deployed service packages that match the parent + filters (if any) are taken into consideration and matched against the + other filter members, like health state filter. :type service_manifest_name_filter: str - :param service_package_activation_id_filter: The activation ID of a deployed service package - that matches the filter. - If not specified, the filter applies to all deployed service packages that match the other - parameters. - If specified, the filter matches only the deployed service package with the specified - activation ID. + :param service_package_activation_id_filter: The activation ID of a + deployed service package that matches the filter. + If not specified, the filter applies to all deployed service packages that + match the other parameters. + If specified, the filter matches only the deployed service package with + the specified activation ID. :type service_package_activation_id_filter: str - :param health_state_filter: The filter for the health state of the deployed service packages. - It allows selecting deployed service packages if they match the desired health states. - The possible values are integer value of one of the following health states. Only deployed - service packages that match the filter are returned. All deployed service packages are used to - evaluate the parent deployed application aggregated health state. - If not specified, default value is None, unless the deployed service package ID is specified. - If the filter has default value and deployed service package ID is specified, the matching - deployed service package is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches deployed service packages with HealthState - value of OK (2) and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + deployed service packages. It allows selecting deployed service packages + if they match the desired health states. + The possible values are integer value of one of the following health + states. Only deployed service packages that match the filter are returned. + All deployed service packages are used to evaluate the parent deployed + application aggregated health state. + If not specified, default value is None, unless the deployed service + package ID is specified. If the filter has default value and deployed + service package ID is specified, the matching deployed service package is + returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed service + packages with HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int """ @@ -9235,31 +8501,32 @@ class DeployedServicePackageHealthStateFilter(msrest.serialization.Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackageHealthStateFilter, self).__init__(**kwargs) self.service_manifest_name_filter = kwargs.get('service_manifest_name_filter', None) self.service_package_activation_id_filter = kwargs.get('service_package_activation_id_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) -class DeployedServicePackageInfo(msrest.serialization.Model): +class DeployedServicePackageInfo(Model): """Information about service package deployed on a Service Fabric node. - :param name: The name of the service package as specified in the service manifest. + :param name: The name of the service package as specified in the service + manifest. :type name: str - :param version: The version of the service package specified in service manifest. + :param version: The version of the service package specified in service + manifest. :type version: str - :param status: Specifies the status of a deployed application or service package on a Service - Fabric node. Possible values include: "Invalid", "Downloading", "Activating", "Active", - "Upgrading", "Deactivating", "RanToCompletion", "Failed". + :param status: Specifies the status of a deployed application or service + package on a Service Fabric node. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating', + 'RanToCompletion', 'Failed' :type status: str or ~azure.servicefabric.models.DeploymentStatus - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -9271,10 +8538,7 @@ class DeployedServicePackageInfo(msrest.serialization.Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackageInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.version = kwargs.get('version', None) @@ -9287,50 +8551,33 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param service_manifest_name: Required. Service manifest name. :type service_manifest_name: str - :param service_package_instance_id: Required. Id of Service package instance. + :param service_package_instance_id: Required. Id of Service package + instance. :type service_package_instance_id: long - :param service_package_activation_id: Required. Id of Service package activation. + :param service_package_activation_id: Required. Id of Service package + activation. :type service_package_activation_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -9346,16 +8593,17 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'service_manifest_name': {'required': True}, 'service_package_instance_id': {'required': True}, @@ -9372,11 +8620,11 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, @@ -9392,55 +8640,52 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackageNewHealthReportEvent, self).__init__(**kwargs) - self.kind = 'DeployedServicePackageNewHealthReport' # type: str - self.service_manifest_name = kwargs['service_manifest_name'] - self.service_package_instance_id = kwargs['service_package_instance_id'] - self.service_package_activation_id = kwargs['service_package_activation_id'] - self.node_name = kwargs['node_name'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.service_package_instance_id = kwargs.get('service_package_instance_id', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.node_name = kwargs.get('node_name', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'DeployedServicePackageNewHealthReport' class DeployedServicePackagesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for deployed service packages, containing health evaluations for each unhealthy deployed service package that impacted current aggregated health state. Can be returned when evaluating deployed application health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for deployed service packages, containing + health evaluations for each unhealthy deployed service package that + impacted current aggregated health state. Can be returned when evaluating + deployed application health and the aggregated health state is either Error + or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param total_count: Total number of deployed service packages of the deployed application in - the health store. + :param kind: Required. Constant filled by server. + :type kind: str + :param total_count: Total number of deployed service packages of the + deployed application in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy DeployedServicePackageHealthEvaluation that impacted the - aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + DeployedServicePackageHealthEvaluation that impacted the aggregated + health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -9448,51 +8693,52 @@ class DeployedServicePackagesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServicePackagesHealthEvaluation, self).__init__(**kwargs) - self.kind = 'DeployedServicePackages' # type: str self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'DeployedServicePackages' -class DeployedServiceReplicaDetailInfo(msrest.serialization.Model): +class DeployedServiceReplicaDetailInfo(Model): """Information about a Service Fabric service replica deployed on a node. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeployedStatefulServiceReplicaDetailInfo, DeployedStatelessServiceInstanceDetailInfo. + sub-classes are: DeployedStatefulServiceReplicaDetailInfo, + DeployedStatelessServiceInstanceDetailInfo All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: Full hierarchical name of the service in URI format starting with - ``fabric:``. + :param service_name: Full hierarchical name of the service in URI format + starting with `fabric:`. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle operation on a - stateful service replica or stateless service instance. Possible values include: "Unknown", - "None", "Open", "ChangeRole", "Close", "Abort". - :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the current service - operation in UTC format. - :type current_service_operation_start_time_utc: ~datetime.datetime + :param current_service_operation: Specifies the current active life-cycle + operation on a stateful service replica or stateless service instance. + Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', + 'Abort' + :type current_service_operation: str or + ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the + current service operation in UTC format. + :type current_service_operation_start_time_utc: datetime :param reported_load: List of load reported by replica. - :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] + :type reported_load: + list[~azure.servicefabric.models.LoadMetricReportInfo] + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -9500,71 +8746,75 @@ class DeployedServiceReplicaDetailInfo(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaDetailInfo', 'Stateless': 'DeployedStatelessServiceInstanceDetailInfo'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServiceReplicaDetailInfo, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) self.current_service_operation = kwargs.get('current_service_operation', None) self.current_service_operation_start_time_utc = kwargs.get('current_service_operation_start_time_utc', None) self.reported_load = kwargs.get('reported_load', None) + self.service_kind = None -class DeployedServiceReplicaInfo(msrest.serialization.Model): +class DeployedServiceReplicaInfo(Model): """Information about a Service Fabric service replica deployed on a node. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeployedStatefulServiceReplicaInfo, DeployedStatelessServiceInstanceInfo. + sub-classes are: DeployedStatefulServiceReplicaInfo, + DeployedStatelessServiceInstanceInfo All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this replica. + :param code_package_name: The name of the code package that hosts this + replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or ChangeRole. + :param address: The last address returned by the replica in Open or + ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the replica. This will - be zero if the replica is down. In hyper-v containers this host process ID will be from - different kernel. + :param host_process_id: Host process ID of the process that is hosting the + replica. This will be zero if the replica is down. In hyper-v containers + this host process ID will be from different kernel. :type host_process_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -9572,7 +8822,6 @@ class DeployedServiceReplicaInfo(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -9582,18 +8831,15 @@ class DeployedServiceReplicaInfo(msrest.serialization.Model): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaInfo', 'Stateless': 'DeployedStatelessServiceInstanceInfo'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServiceReplicaInfo, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.service_name = kwargs.get('service_name', None) self.service_type_name = kwargs.get('service_type_name', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) @@ -9603,25 +8849,31 @@ def __init__( self.address = kwargs.get('address', None) self.service_package_activation_id = kwargs.get('service_package_activation_id', None) self.host_process_id = kwargs.get('host_process_id', None) + self.service_kind = None -class DeployedServiceTypeInfo(msrest.serialization.Model): - """Information about service type deployed on a node, information such as the status of the service type registration on a node. +class DeployedServiceTypeInfo(Model): + """Information about service type deployed on a node, information such as the + status of the service type registration on a node. - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that registered the service type. + :param code_package_name: The name of the code package that registered the + service type. :type code_package_name: str - :param status: The status of the service type registration on the node. Possible values - include: "Invalid", "Disabled", "Enabled", "Registered". - :type status: str or ~azure.servicefabric.models.ServiceTypeRegistrationStatus - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param status: The status of the service type registration on the node. + Possible values include: 'Invalid', 'Disabled', 'Enabled', 'Registered' + :type status: str or + ~azure.servicefabric.models.ServiceTypeRegistrationStatus + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -9634,10 +8886,7 @@ class DeployedServiceTypeInfo(msrest.serialization.Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedServiceTypeInfo, self).__init__(**kwargs) self.service_type_name = kwargs.get('service_type_name', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) @@ -9647,54 +8896,71 @@ def __init__( class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo): - """Information about a stateful replica running in a code package. Note DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and replicaId. + """Information about a stateful replica running in a code package. Note + DeployedServiceReplicaQueryResult will contain duplicate data like + ServiceKind, ServiceName, PartitionId and replicaId. All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: Full hierarchical name of the service in URI format starting with - ``fabric:``. + :param service_name: Full hierarchical name of the service in URI format + starting with `fabric:`. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle operation on a - stateful service replica or stateless service instance. Possible values include: "Unknown", - "None", "Open", "ChangeRole", "Close", "Abort". - :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the current service - operation in UTC format. - :type current_service_operation_start_time_utc: ~datetime.datetime + :param current_service_operation: Specifies the current active life-cycle + operation on a stateful service replica or stateless service instance. + Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', + 'Abort' + :type current_service_operation: str or + ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the + current service operation in UTC format. + :type current_service_operation_start_time_utc: datetime :param reported_load: List of load reported by replica. - :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :type reported_load: + list[~azure.servicefabric.models.LoadMetricReportInfo] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str - :param current_replicator_operation: Specifies the operation currently being executed by the - Replicator. Possible values include: "Invalid", "None", "Open", "ChangeRole", "UpdateEpoch", - "Close", "Abort", "OnDataLoss", "WaitForCatchup", "Build". - :type current_replicator_operation: str or ~azure.servicefabric.models.ReplicatorOperationName - :param read_status: Specifies the access status of the partition. Possible values include: - "Invalid", "Granted", "ReconfigurationPending", "NotPrimary", "NoWriteQuorum". - :type read_status: str or ~azure.servicefabric.models.PartitionAccessStatus - :param write_status: Specifies the access status of the partition. Possible values include: - "Invalid", "Granted", "ReconfigurationPending", "NotPrimary", "NoWriteQuorum". - :type write_status: str or ~azure.servicefabric.models.PartitionAccessStatus - :param replicator_status: Represents a base class for primary or secondary replicator status. - Contains information about the service fabric replicator like the replication/copy queue - utilization, last acknowledgement received timestamp, etc. + :param current_replicator_operation: Specifies the operation currently + being executed by the Replicator. Possible values include: 'Invalid', + 'None', 'Open', 'ChangeRole', 'UpdateEpoch', 'Close', 'Abort', + 'OnDataLoss', 'WaitForCatchup', 'Build' + :type current_replicator_operation: str or + ~azure.servicefabric.models.ReplicatorOperationName + :param read_status: Specifies the access status of the partition. Possible + values include: 'Invalid', 'Granted', 'ReconfigurationPending', + 'NotPrimary', 'NoWriteQuorum' + :type read_status: str or + ~azure.servicefabric.models.PartitionAccessStatus + :param write_status: Specifies the access status of the partition. + Possible values include: 'Invalid', 'Granted', 'ReconfigurationPending', + 'NotPrimary', 'NoWriteQuorum' + :type write_status: str or + ~azure.servicefabric.models.PartitionAccessStatus + :param replicator_status: Represents a base class for primary or secondary + replicator status. + Contains information about the service fabric replicator like the + replication/copy queue utilization, last acknowledgement received + timestamp, etc. :type replicator_status: ~azure.servicefabric.models.ReplicatorStatus - :param replica_status: Key value store related information for the replica. - :type replica_status: ~azure.servicefabric.models.KeyValueStoreReplicaStatus - :param deployed_service_replica_query_result: Information about a stateful service replica - deployed on a node. + :param replica_status: Key value store related information for the + replica. + :type replica_status: + ~azure.servicefabric.models.KeyValueStoreReplicaStatus + :param deployed_service_replica_query_result: Information about a stateful + service replica deployed on a node. :type deployed_service_replica_query_result: ~azure.servicefabric.models.DeployedStatefulServiceReplicaInfo """ @@ -9704,12 +8970,12 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, 'current_replicator_operation': {'key': 'CurrentReplicatorOperation', 'type': 'str'}, 'read_status': {'key': 'ReadStatus', 'type': 'str'}, @@ -9719,12 +8985,8 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatefulServiceReplicaInfo'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedStatefulServiceReplicaDetailInfo, self).__init__(**kwargs) - self.service_kind = 'Stateful' # type: str self.replica_id = kwargs.get('replica_id', None) self.current_replicator_operation = kwargs.get('current_replicator_operation', None) self.read_status = kwargs.get('read_status', None) @@ -9732,6 +8994,7 @@ def __init__( self.replicator_status = kwargs.get('replicator_status', None) self.replica_status = kwargs.get('replica_status', None) self.deployed_service_replica_query_result = kwargs.get('deployed_service_replica_query_result', None) + self.service_kind = 'Stateful' class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): @@ -9739,50 +9002,61 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this replica. + :param code_package_name: The name of the code package that hosts this + replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or ChangeRole. + :param address: The last address returned by the replica in Open or + ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the replica. This will - be zero if the replica is down. In hyper-v containers this host process ID will be from - different kernel. + :param host_process_id: Host process ID of the process that is hosting the + replica. This will be zero if the replica is down. In hyper-v containers + this host process ID will be from different kernel. :type host_process_id: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str - :param replica_role: The role of a replica of a stateful service. Possible values include: - "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :param replica_role: The role of a replica of a stateful service. Possible + values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', + 'ActiveSecondary' :type replica_role: str or ~azure.servicefabric.models.ReplicaRole - :param reconfiguration_information: Information about current reconfiguration like phase, type, - previous configuration role of replica and reconfiguration start date time. - :type reconfiguration_information: ~azure.servicefabric.models.ReconfigurationInformation + :param reconfiguration_information: Information about current + reconfiguration like phase, type, previous configuration role of replica + and reconfiguration start date time. + :type reconfiguration_information: + ~azure.servicefabric.models.ReconfigurationInformation """ _validation = { @@ -9790,7 +9064,6 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -9800,54 +9073,58 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, 'reconfiguration_information': {'key': 'ReconfigurationInformation', 'type': 'ReconfigurationInformation'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedStatefulServiceReplicaInfo, self).__init__(**kwargs) - self.service_kind = 'Stateful' # type: str self.replica_id = kwargs.get('replica_id', None) self.replica_role = kwargs.get('replica_role', None) self.reconfiguration_information = kwargs.get('reconfiguration_information', None) + self.service_kind = 'Stateful' class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInfo): - """Information about a stateless instance running in a code package. Note that DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and InstanceId. + """Information about a stateless instance running in a code package. Note that + DeployedServiceReplicaQueryResult will contain duplicate data like + ServiceKind, ServiceName, PartitionId and InstanceId. All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: Full hierarchical name of the service in URI format starting with - ``fabric:``. + :param service_name: Full hierarchical name of the service in URI format + starting with `fabric:`. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle operation on a - stateful service replica or stateless service instance. Possible values include: "Unknown", - "None", "Open", "ChangeRole", "Close", "Abort". - :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the current service - operation in UTC format. - :type current_service_operation_start_time_utc: ~datetime.datetime + :param current_service_operation: Specifies the current active life-cycle + operation on a stateful service replica or stateless service instance. + Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', + 'Abort' + :type current_service_operation: str or + ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the + current service operation in UTC format. + :type current_service_operation_start_time_utc: datetime :param reported_load: List of load reported by replica. - :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] - :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to - uniquely identify an instance of a partition of a stateless service. It is unique within a - partition and does not change for the lifetime of the instance. If the instance has failed over - on the same or different node, it will get a different value for the InstanceId. + :type reported_load: + list[~azure.servicefabric.models.LoadMetricReportInfo] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. :type instance_id: str - :param deployed_service_replica_query_result: Information about a stateless service instance - deployed on a node. + :param deployed_service_replica_query_result: Information about a + stateless service instance deployed on a node. :type deployed_service_replica_query_result: ~azure.servicefabric.models.DeployedStatelessServiceInstanceInfo """ @@ -9857,24 +9134,21 @@ class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInf } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatelessServiceInstanceInfo'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedStatelessServiceInstanceDetailInfo, self).__init__(**kwargs) - self.service_kind = 'Stateless' # type: str self.instance_id = kwargs.get('instance_id', None) self.deployed_service_replica_query_result = kwargs.get('deployed_service_replica_query_result', None) + self.service_kind = 'Stateless' class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): @@ -9882,42 +9156,49 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this replica. + :param code_package_name: The name of the code package that hosts this + replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or ChangeRole. + :param address: The last address returned by the replica in Open or + ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the replica. This will - be zero if the replica is down. In hyper-v containers this host process ID will be from - different kernel. + :param host_process_id: Host process ID of the process that is hosting the + replica. This will be zero if the replica is down. In hyper-v containers + this host process ID will be from different kernel. :type host_process_id: str - :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to - uniquely identify an instance of a partition of a stateless service. It is unique within a - partition and does not change for the lifetime of the instance. If the instance has failed over - on the same or different node, it will get a different value for the InstanceId. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -9926,7 +9207,6 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -9936,36 +9216,36 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployedStatelessServiceInstanceInfo, self).__init__(**kwargs) - self.service_kind = 'Stateless' # type: str self.instance_id = kwargs.get('instance_id', None) + self.service_kind = 'Stateless' -class DeployServicePackageToNodeDescription(msrest.serialization.Model): - """Defines description for downloading packages associated with a service manifest to image cache on a Service Fabric node. +class DeployServicePackageToNodeDescription(Model): + """Defines description for downloading packages associated with a service + manifest to image cache on a Service Fabric node. All required parameters must be populated in order to send to Azure. - :param service_manifest_name: Required. The name of service manifest whose packages need to be - downloaded. + :param service_manifest_name: Required. The name of service manifest whose + packages need to be downloaded. :type service_manifest_name: str - :param application_type_name: Required. The application type name as defined in the application - manifest. + :param application_type_name: Required. The application type name as + defined in the application manifest. :type application_type_name: str - :param application_type_version: Required. The version of the application type as defined in - the application manifest. + :param application_type_version: Required. The version of the application + type as defined in the application manifest. :type application_type_version: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param package_sharing_policy: List of package sharing policy information. - :type package_sharing_policy: list[~azure.servicefabric.models.PackageSharingPolicyInfo] + :type package_sharing_policy: + list[~azure.servicefabric.models.PackageSharingPolicyInfo] """ _validation = { @@ -9983,27 +9263,24 @@ class DeployServicePackageToNodeDescription(msrest.serialization.Model): 'package_sharing_policy': {'key': 'PackageSharingPolicy', 'type': '[PackageSharingPolicyInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DeployServicePackageToNodeDescription, self).__init__(**kwargs) - self.service_manifest_name = kwargs['service_manifest_name'] - self.application_type_name = kwargs['application_type_name'] - self.application_type_version = kwargs['application_type_version'] - self.node_name = kwargs['node_name'] + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.node_name = kwargs.get('node_name', None) self.package_sharing_policy = kwargs.get('package_sharing_policy', None) -class DiagnosticsDescription(msrest.serialization.Model): +class DiagnosticsDescription(Model): """Describes the diagnostics options available. :param sinks: List of supported sinks that can be referenced. :type sinks: list[~azure.servicefabric.models.DiagnosticsSinkProperties] :param enabled: Status of whether or not sinks are enabled. :type enabled: bool - :param default_sink_refs: The sinks to be used if diagnostics is enabled. Sink choices can be - overridden at the service and code package level. + :param default_sink_refs: The sinks to be used if diagnostics is enabled. + Sink choices can be overridden at the service and code package level. :type default_sink_refs: list[str] """ @@ -10013,23 +9290,20 @@ class DiagnosticsDescription(msrest.serialization.Model): 'default_sink_refs': {'key': 'defaultSinkRefs', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DiagnosticsDescription, self).__init__(**kwargs) self.sinks = kwargs.get('sinks', None) self.enabled = kwargs.get('enabled', None) self.default_sink_refs = kwargs.get('default_sink_refs', None) -class DiagnosticsRef(msrest.serialization.Model): +class DiagnosticsRef(Model): """Reference to sinks in DiagnosticsDescription. :param enabled: Status of whether or not sinks are enabled. :type enabled: bool - :param sink_refs: List of sinks to be used if enabled. References the list of sinks in - DiagnosticsDescription. + :param sink_refs: List of sinks to be used if enabled. References the list + of sinks in DiagnosticsDescription. :type sink_refs: list[str] """ @@ -10038,23 +9312,21 @@ class DiagnosticsRef(msrest.serialization.Model): 'sink_refs': {'key': 'sinkRefs', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DiagnosticsRef, self).__init__(**kwargs) self.enabled = kwargs.get('enabled', None) self.sink_refs = kwargs.get('sink_refs', None) -class DisableBackupDescription(msrest.serialization.Model): - """It describes the body parameters while disabling backup of a backup entity(Application/Service/Partition). +class DisableBackupDescription(Model): + """It describes the body parameters while disabling backup of a backup + entity(Application/Service/Partition). All required parameters must be populated in order to send to Azure. - :param clean_backup: Required. Boolean flag to delete backups. It can be set to true for - deleting all the backups which were created for the backup entity that is getting disabled for - backup. + :param clean_backup: Required. Boolean flag to delete backups. It can be + set to true for deleting all the backups which were created for the backup + entity that is getting disabled for backup. :type clean_backup: bool """ @@ -10066,20 +9338,17 @@ class DisableBackupDescription(msrest.serialization.Model): 'clean_backup': {'key': 'CleanBackup', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DisableBackupDescription, self).__init__(**kwargs) - self.clean_backup = kwargs['clean_backup'] + self.clean_backup = kwargs.get('clean_backup', None) -class DiskInfo(msrest.serialization.Model): +class DiskInfo(Model): """Information about the disk. - :param capacity: the disk size in bytes. + :param capacity: the disk size in bytes :type capacity: str - :param available_space: the available disk space in bytes. + :param available_space: the available disk space in bytes :type available_space: str """ @@ -10088,10 +9357,7 @@ class DiskInfo(msrest.serialization.Model): 'available_space': {'key': 'AvailableSpace', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DiskInfo, self).__init__(**kwargs) self.capacity = kwargs.get('capacity', None) self.available_space = kwargs.get('available_space', None) @@ -10102,10 +9368,8 @@ class DoublePropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str :param data: Required. The data of the property value. :type data: float """ @@ -10120,31 +9384,27 @@ class DoublePropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DoublePropertyValue, self).__init__(**kwargs) - self.kind = 'Double' # type: str - self.data = kwargs['data'] + self.data = kwargs.get('data', None) + self.kind = 'Double' class DsmsAzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Dsms Azure blob store used for storing and enumerating backups. + """Describes the parameters for Dsms Azure blob store used for storing and + enumerating backups. All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_credentials_source_location: Required. The source location of the storage - credentials to connect to the Dsms Azure blob store. + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param storage_credentials_source_location: Required. The source location + of the storage credentials to connect to the Dsms Azure blob store. :type storage_credentials_source_location: str - :param container_name: Required. The name of the container in the blob store to store and - enumerate backups from. + :param container_name: Required. The name of the container in the blob + store to store and enumerate backups from. :type container_name: str """ @@ -10155,29 +9415,26 @@ class DsmsAzureBlobBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'storage_credentials_source_location': {'key': 'StorageCredentialsSourceLocation', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(DsmsAzureBlobBackupStorageDescription, self).__init__(**kwargs) - self.storage_kind = 'DsmsAzureBlobStore' # type: str - self.storage_credentials_source_location = kwargs['storage_credentials_source_location'] - self.container_name = kwargs['container_name'] + self.storage_credentials_source_location = kwargs.get('storage_credentials_source_location', None) + self.container_name = kwargs.get('container_name', None) + self.storage_kind = 'DsmsAzureBlobStore' -class EnableBackupDescription(msrest.serialization.Model): +class EnableBackupDescription(Model): """Specifies the parameters needed to enable periodic backup. All required parameters must be populated in order to send to Azure. - :param backup_policy_name: Required. Name of the backup policy to be used for enabling periodic - backups. + :param backup_policy_name: Required. Name of the backup policy to be used + for enabling periodic backups. :type backup_policy_name: str """ @@ -10189,15 +9446,12 @@ class EnableBackupDescription(msrest.serialization.Model): 'backup_policy_name': {'key': 'BackupPolicyName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EnableBackupDescription, self).__init__(**kwargs) - self.backup_policy_name = kwargs['backup_policy_name'] + self.backup_policy_name = kwargs.get('backup_policy_name', None) -class EndpointProperties(msrest.serialization.Model): +class EndpointProperties(Model): """Describes a container endpoint. All required parameters must be populated in order to send to Azure. @@ -10217,16 +9471,13 @@ class EndpointProperties(msrest.serialization.Model): 'port': {'key': 'port', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EndpointProperties, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.port = kwargs.get('port', None) -class EndpointRef(msrest.serialization.Model): +class EndpointRef(Model): """Describes a reference to a service endpoint. :param name: Name of the endpoint. @@ -10237,28 +9488,23 @@ class EndpointRef(msrest.serialization.Model): 'name': {'key': 'name', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EndpointRef, self).__init__(**kwargs) self.name = kwargs.get('name', None) -class SafetyCheck(msrest.serialization.Model): - """Represents a safety check performed by service fabric before continuing with the operations. These checks ensure the availability of the service and the reliability of the state. +class SafetyCheck(Model): + """Represents a safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service + and the reliability of the state. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SeedNodeSafetyCheck, PartitionSafetyCheck. + sub-classes are: PartitionSafetyCheck, SeedNodeSafetyCheck All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -10270,32 +9516,30 @@ class SafetyCheck(msrest.serialization.Model): } _subtype_map = { - 'kind': {'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck', 'PartitionSafetyCheck': 'PartitionSafetyCheck'} + 'kind': {'PartitionSafetyCheck': 'PartitionSafetyCheck', 'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SafetyCheck, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class PartitionSafetyCheck(SafetyCheck): - """Represents a safety check for the service partition being performed by service fabric before continuing with operations. + """Represents a safety check for the service partition being performed by + service fabric before continuing with operations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: EnsureAvailabilitySafetyCheck, EnsurePartitionQuorumSafetyCheck, WaitForInbuildReplicaSafetyCheck, WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, WaitForReconfigurationSafetyCheck. + sub-classes are: EnsureAvailabilitySafetyCheck, + EnsurePartitionQuorumSafetyCheck, WaitForInbuildReplicaSafetyCheck, + WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, + WaitForReconfigurationSafetyCheck All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -10312,27 +9556,23 @@ class PartitionSafetyCheck(SafetyCheck): 'kind': {'EnsureAvailability': 'EnsureAvailabilitySafetyCheck', 'EnsurePartitionQuorum': 'EnsurePartitionQuorumSafetyCheck', 'WaitForInbuildReplica': 'WaitForInbuildReplicaSafetyCheck', 'WaitForPrimaryPlacement': 'WaitForPrimaryPlacementSafetyCheck', 'WaitForPrimarySwap': 'WaitForPrimarySwapSafetyCheck', 'WaitForReconfiguration': 'WaitForReconfigurationSafetyCheck'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionSafetyCheck, self).__init__(**kwargs) - self.kind = 'PartitionSafetyCheck' # type: str self.partition_id = kwargs.get('partition_id', None) + self.kind = 'PartitionSafetyCheck' class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): - """Safety check that waits to ensure the availability of the partition. It waits until there are replicas available such that bringing down this replica will not cause availability loss for the partition. + """Safety check that waits to ensure the availability of the partition. It + waits until there are replicas available such that bringing down this + replica will not cause availability loss for the partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -10345,26 +9585,21 @@ class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EnsureAvailabilitySafetyCheck, self).__init__(**kwargs) - self.kind = 'EnsureAvailability' # type: str + self.kind = 'EnsureAvailability' class EnsurePartitionQuorumSafetyCheck(PartitionSafetyCheck): - """Safety check that ensures that a quorum of replicas are not lost for a partition. + """Safety check that ensures that a quorum of replicas are not lost for a + partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -10377,22 +9612,21 @@ class EnsurePartitionQuorumSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EnsurePartitionQuorumSafetyCheck, self).__init__(**kwargs) - self.kind = 'EnsurePartitionQuorum' # type: str + self.kind = 'EnsurePartitionQuorum' -class EntityKindHealthStateCount(msrest.serialization.Model): +class EntityKindHealthStateCount(Model): """Represents health state count for entities of the specified entity kind. - :param entity_kind: The entity kind for which health states are evaluated. Possible values - include: "Invalid", "Node", "Partition", "Service", "Application", "Replica", - "DeployedApplication", "DeployedServicePackage", "Cluster". + :param entity_kind: The entity kind for which health states are evaluated. + Possible values include: 'Invalid', 'Node', 'Partition', 'Service', + 'Application', 'Replica', 'DeployedApplication', 'DeployedServicePackage', + 'Cluster' :type entity_kind: str or ~azure.servicefabric.models.EntityKind - :param health_state_count: The health state count for the entities of the specified kind. + :param health_state_count: The health state count for the entities of the + specified kind. :type health_state_count: ~azure.servicefabric.models.HealthStateCount """ @@ -10401,25 +9635,23 @@ class EntityKindHealthStateCount(msrest.serialization.Model): 'health_state_count': {'key': 'HealthStateCount', 'type': 'HealthStateCount'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EntityKindHealthStateCount, self).__init__(**kwargs) self.entity_kind = kwargs.get('entity_kind', None) self.health_state_count = kwargs.get('health_state_count', None) -class EnvironmentVariable(msrest.serialization.Model): +class EnvironmentVariable(Model): """Describes an environment variable for the container. - :param type: The type of the environment variable being given in value. Possible values - include: "ClearText", "KeyVaultReference", "SecretValueReference". Default value: "ClearText". + :param type: The type of the environment variable being given in value. + Possible values include: 'ClearText', 'KeyVaultReference', + 'SecretValueReference'. Default value: "ClearText" . :type type: str or ~azure.servicefabric.models.EnvironmentVariableType :param name: The name of the environment variable. :type name: str - :param value: The value of the environment variable, will be processed based on the type - provided. + :param value: The value of the environment variable, will be processed + based on the type provided. :type value: str """ @@ -10429,26 +9661,28 @@ class EnvironmentVariable(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EnvironmentVariable, self).__init__(**kwargs) self.type = kwargs.get('type', "ClearText") self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) -class Epoch(msrest.serialization.Model): - """An Epoch is a configuration number for the partition as a whole. When the configuration of the replica set changes, for example when the Primary replica changes, the operations that are replicated from the new Primary replica are said to be a new Epoch from the ones which were sent by the old Primary replica. +class Epoch(Model): + """An Epoch is a configuration number for the partition as a whole. When the + configuration of the replica set changes, for example when the Primary + replica changes, the operations that are replicated from the new Primary + replica are said to be a new Epoch from the ones which were sent by the old + Primary replica. - :param configuration_version: The current configuration number of this Epoch. The configuration - number is an increasing value that is updated whenever the configuration of this replica set - changes. + :param configuration_version: The current configuration number of this + Epoch. The configuration number is an increasing value that is updated + whenever the configuration of this replica set changes. :type configuration_version: str - :param data_loss_version: The current data loss number of this Epoch. The data loss number - property is an increasing value which is updated whenever data loss is suspected, as when loss - of a quorum of replicas in the replica set that includes the Primary replica. + :param data_loss_version: The current data loss number of this Epoch. The + data loss number property is an increasing value which is updated whenever + data loss is suspected, as when loss of a quorum of replicas in the + replica set that includes the Primary replica. :type data_loss_version: str """ @@ -10457,43 +9691,38 @@ class Epoch(msrest.serialization.Model): 'data_loss_version': {'key': 'DataLossVersion', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(Epoch, self).__init__(**kwargs) self.configuration_version = kwargs.get('configuration_version', None) self.data_loss_version = kwargs.get('data_loss_version', None) class EventHealthEvaluation(HealthEvaluation): - """Represents health evaluation of a HealthEvent that was reported on the entity. -The health evaluation is returned when evaluating health of an entity results in Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation of a HealthEvent that was reported on the + entity. + The health evaluation is returned when evaluating health of an entity + results in Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param consider_warning_as_error: Indicates whether warnings are treated with the same severity - as errors. The field is specified in the health policy used to evaluate the entity. + :param kind: Required. Constant filled by server. + :type kind: str + :param consider_warning_as_error: Indicates whether warnings are treated + with the same severity as errors. The field is specified in the health + policy used to evaluate the entity. :type consider_warning_as_error: bool - :param unhealthy_event: Represents health information reported on a health entity, such as - cluster, application or node, with additional metadata added by the Health Manager. + :param unhealthy_event: Represents health information reported on a health + entity, such as cluster, application or node, with additional metadata + added by the Health Manager. :type unhealthy_event: ~azure.servicefabric.models.HealthEvent """ @@ -10502,158 +9731,152 @@ class EventHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'consider_warning_as_error': {'key': 'ConsiderWarningAsError', 'type': 'bool'}, 'unhealthy_event': {'key': 'UnhealthyEvent', 'type': 'HealthEvent'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(EventHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Event' # type: str self.consider_warning_as_error = kwargs.get('consider_warning_as_error', None) self.unhealthy_event = kwargs.get('unhealthy_event', None) + self.kind = 'Event' class ExecutingFaultsChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos has decided on the faults for an iteration. This Chaos event contains the details of the faults as a list of strings. + """Describes a Chaos event that gets generated when Chaos has decided on the + faults for an iteration. This Chaos event contains the details of the + faults as a list of strings. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param faults: List of string description of the faults that Chaos decided to execute in an - iteration. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param faults: List of string description of the faults that Chaos decided + to execute in an iteration. :type faults: list[str] """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'faults': {'key': 'Faults', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ExecutingFaultsChaosEvent, self).__init__(**kwargs) - self.kind = 'ExecutingFaults' # type: str self.faults = kwargs.get('faults', None) + self.kind = 'ExecutingFaults' -class ProvisionApplicationTypeDescriptionBase(msrest.serialization.Model): - """Represents the type of registration or provision requested, and if the operation needs to be asynchronous or not. Supported types of provision operations are from either image store or external store. +class ProvisionApplicationTypeDescriptionBase(Model): + """Represents the type of registration or provision requested, and if the + operation needs to be asynchronous or not. Supported types of provision + operations are from either image store or external store. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ExternalStoreProvisionApplicationTypeDescription, ProvisionApplicationTypeDescription. + sub-classes are: ProvisionApplicationTypeDescription, + ExternalStoreProvisionApplicationTypeDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of application type registration or provision requested. The - application package can be registered or provisioned either from the image store or from an - external store. Following are the kinds of the application type provision.Constant filled by - server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". - :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind - :param async_property: Required. Indicates whether or not provisioning should occur - asynchronously. When set to true, the provision operation returns when the request is accepted - by the system, and the provision operation continues without any timeout limit. The default - value is false. For large application packages, we recommend setting the value to true. + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. :type async_property: bool + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'async_property': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'async_property': {'key': 'Async', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription', 'ImageStorePath': 'ProvisionApplicationTypeDescription'} + 'kind': {'ImageStorePath': 'ProvisionApplicationTypeDescription', 'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ProvisionApplicationTypeDescriptionBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] - self.async_property = kwargs['async_property'] + self.async_property = kwargs.get('async_property', None) + self.kind = None class ExternalStoreProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): - """Describes the operation to register or provision an application type using an application package from an external store instead of a package uploaded to the Service Fabric image store. + """Describes the operation to register or provision an application type using + an application package from an external store instead of a package uploaded + to the Service Fabric image store. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of application type registration or provision requested. The - application package can be registered or provisioned either from the image store or from an - external store. Following are the kinds of the application type provision.Constant filled by - server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". - :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind - :param async_property: Required. Indicates whether or not provisioning should occur - asynchronously. When set to true, the provision operation returns when the request is accepted - by the system, and the provision operation continues without any timeout limit. The default - value is false. For large application packages, we recommend setting the value to true. + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. :type async_property: bool - :param application_package_download_uri: Required. The path to the '.sfpkg' application package - from where the application package can be downloaded using HTTP or HTTPS protocols. The - application package can be stored in an external store that provides GET operation to download - the file. Supported protocols are HTTP and HTTPS, and the path must allow READ access. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_package_download_uri: Required. The path to the + '.sfpkg' application package from where the application package can be + downloaded using HTTP or HTTPS protocols. The application package can be + stored in an external store that provides GET operation to download the + file. Supported protocols are HTTP and HTTPS, and the path must allow READ + access. :type application_package_download_uri: str - :param application_type_name: Required. The application type name represents the name of the - application type found in the application manifest. + :param application_type_name: Required. The application type name + represents the name of the application type found in the application + manifest. :type application_type_name: str - :param application_type_version: Required. The application type version represents the version - of the application type found in the application manifest. + :param application_type_version: Required. The application type version + represents the version of the application type found in the application + manifest. :type application_type_version: str """ _validation = { - 'kind': {'required': True}, 'async_property': {'required': True}, + 'kind': {'required': True}, 'application_package_download_uri': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'async_property': {'key': 'Async', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_package_download_uri': {'key': 'ApplicationPackageDownloadUri', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ExternalStoreProvisionApplicationTypeDescription, self).__init__(**kwargs) - self.kind = 'ExternalStore' # type: str - self.application_package_download_uri = kwargs['application_package_download_uri'] - self.application_type_name = kwargs['application_type_name'] - self.application_type_version = kwargs['application_type_version'] + self.application_package_download_uri = kwargs.get('application_package_download_uri', None) + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.kind = 'ExternalStore' -class FabricCodeVersionInfo(msrest.serialization.Model): +class FabricCodeVersionInfo(Model): """Information about a Service Fabric code version. :param code_version: The product version of Service Fabric. @@ -10664,15 +9887,12 @@ class FabricCodeVersionInfo(msrest.serialization.Model): 'code_version': {'key': 'CodeVersion', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FabricCodeVersionInfo, self).__init__(**kwargs) self.code_version = kwargs.get('code_version', None) -class FabricConfigVersionInfo(msrest.serialization.Model): +class FabricConfigVersionInfo(Model): """Information about a Service Fabric config version. :param config_version: The config version of Service Fabric. @@ -10683,20 +9903,20 @@ class FabricConfigVersionInfo(msrest.serialization.Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FabricConfigVersionInfo, self).__init__(**kwargs) self.config_version = kwargs.get('config_version', None) -class FabricError(msrest.serialization.Model): - """The REST API operations for Service Fabric return standard HTTP status codes. This type defines the additional information returned from the Service Fabric API operations that are not successful. +class FabricError(Model): + """The REST API operations for Service Fabric return standard HTTP status + codes. This type defines the additional information returned from the + Service Fabric API operations that are not successful. All required parameters must be populated in order to send to Azure. - :param error: Required. Error object containing error code and error message. + :param error: Required. Error object containing error code and error + message. :type error: ~azure.servicefabric.models.FabricErrorError """ @@ -10708,182 +9928,184 @@ class FabricError(msrest.serialization.Model): 'error': {'key': 'Error', 'type': 'FabricErrorError'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FabricError, self).__init__(**kwargs) - self.error = kwargs['error'] + self.error = kwargs.get('error', None) + + +class FabricErrorException(HttpOperationError): + """Server responsed with exception of type: 'FabricError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(FabricErrorException, self).__init__(deserialize, response, 'FabricError', *args) -class FabricErrorError(msrest.serialization.Model): +class FabricErrorError(Model): """Error object containing error code and error message. All required parameters must be populated in order to send to Azure. - :param code: Required. Defines the fabric error codes that be returned as part of the error - object in response to Service Fabric API operations that are not successful. Following are the - error code values that can be returned for a specific HTTP status code. - - - * - Possible values of the error code for HTTP status code 400 (Bad Request) - - - * "FABRIC_E_INVALID_PARTITION_KEY" - * "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" - * "FABRIC_E_INVALID_ADDRESS" - * "FABRIC_E_APPLICATION_NOT_UPGRADING" - * "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" - * "FABRIC_E_FABRIC_NOT_UPGRADING" - * "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" - * "FABRIC_E_INVALID_CONFIGURATION" - * "FABRIC_E_INVALID_NAME_URI" - * "FABRIC_E_PATH_TOO_LONG" - * "FABRIC_E_KEY_TOO_LARGE" - * "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" - * "FABRIC_E_INVALID_ATOMIC_GROUP" - * "FABRIC_E_VALUE_EMPTY" - * "FABRIC_E_BACKUP_IS_ENABLED" - * "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" - * "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" - * "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" - * "E_INVALIDARG" - - * - Possible values of the error code for HTTP status code 404 (Not Found) - - - * "FABRIC_E_NODE_NOT_FOUND" - * "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" - * "FABRIC_E_APPLICATION_NOT_FOUND" - * "FABRIC_E_SERVICE_TYPE_NOT_FOUND" - * "FABRIC_E_SERVICE_DOES_NOT_EXIST" - * "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" - * "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" - * "FABRIC_E_PARTITION_NOT_FOUND" - * "FABRIC_E_REPLICA_DOES_NOT_EXIST" - * "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" - * "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" - * "FABRIC_E_DIRECTORY_NOT_FOUND" - * "FABRIC_E_FABRIC_VERSION_NOT_FOUND" - * "FABRIC_E_FILE_NOT_FOUND" - * "FABRIC_E_NAME_DOES_NOT_EXIST" - * "FABRIC_E_PROPERTY_DOES_NOT_EXIST" - * "FABRIC_E_ENUMERATION_COMPLETED" - * "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" - * "FABRIC_E_KEY_NOT_FOUND" - * "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" - * "FABRIC_E_BACKUP_NOT_ENABLED" - * "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" - * "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" - * "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" - - * - Possible values of the error code for HTTP status code 409 (Conflict) - - - * "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" - * "FABRIC_E_APPLICATION_ALREADY_EXISTS" - * "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" - * "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" - * "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" - * "FABRIC_E_SERVICE_ALREADY_EXISTS" - * "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" - * "FABRIC_E_APPLICATION_TYPE_IN_USE" - * "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" - * "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" - * "FABRIC_E_FABRIC_VERSION_IN_USE" - * "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" - * "FABRIC_E_NAME_ALREADY_EXISTS" - * "FABRIC_E_NAME_NOT_EMPTY" - * "FABRIC_E_PROPERTY_CHECK_FAILED" - * "FABRIC_E_SERVICE_METADATA_MISMATCH" - * "FABRIC_E_SERVICE_TYPE_MISMATCH" - * "FABRIC_E_HEALTH_STALE_REPORT" - * "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" - * "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" - * "FABRIC_E_INSTANCE_ID_MISMATCH" - * "FABRIC_E_BACKUP_IN_PROGRESS" - * "FABRIC_E_RESTORE_IN_PROGRESS" - * "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" - - * - Possible values of the error code for HTTP status code 413 (Request Entity Too Large) - - - * "FABRIC_E_VALUE_TOO_LARGE" - - * - Possible values of the error code for HTTP status code 500 (Internal Server Error) - - - * "FABRIC_E_NODE_IS_UP" - * "E_FAIL" - * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" - * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" - * "FABRIC_E_VOLUME_ALREADY_EXISTS" - * "FABRIC_E_VOLUME_NOT_FOUND" - * "SerializationError" - - * - Possible values of the error code for HTTP status code 503 (Service Unavailable) - - - * "FABRIC_E_NO_WRITE_QUORUM" - * "FABRIC_E_NOT_PRIMARY" - * "FABRIC_E_NOT_READY" - * "FABRIC_E_RECONFIGURATION_PENDING" - * "FABRIC_E_SERVICE_OFFLINE" - * "E_ABORT" - * "FABRIC_E_VALUE_TOO_LARGE" - - * - Possible values of the error code for HTTP status code 504 (Gateway Timeout) - - - * "FABRIC_E_COMMUNICATION_ERROR" - * "FABRIC_E_OPERATION_NOT_COMPLETE" - * "FABRIC_E_TIMEOUT". Possible values include: "FABRIC_E_INVALID_PARTITION_KEY", - "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR", "FABRIC_E_INVALID_ADDRESS", - "FABRIC_E_APPLICATION_NOT_UPGRADING", "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR", - "FABRIC_E_FABRIC_NOT_UPGRADING", "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR", - "FABRIC_E_INVALID_CONFIGURATION", "FABRIC_E_INVALID_NAME_URI", "FABRIC_E_PATH_TOO_LONG", - "FABRIC_E_KEY_TOO_LARGE", "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED", - "FABRIC_E_INVALID_ATOMIC_GROUP", "FABRIC_E_VALUE_EMPTY", "FABRIC_E_NODE_NOT_FOUND", - "FABRIC_E_APPLICATION_TYPE_NOT_FOUND", "FABRIC_E_APPLICATION_NOT_FOUND", - "FABRIC_E_SERVICE_TYPE_NOT_FOUND", "FABRIC_E_SERVICE_DOES_NOT_EXIST", - "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND", "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND", - "FABRIC_E_PARTITION_NOT_FOUND", "FABRIC_E_REPLICA_DOES_NOT_EXIST", - "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST", "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND", - "FABRIC_E_DIRECTORY_NOT_FOUND", "FABRIC_E_FABRIC_VERSION_NOT_FOUND", "FABRIC_E_FILE_NOT_FOUND", - "FABRIC_E_NAME_DOES_NOT_EXIST", "FABRIC_E_PROPERTY_DOES_NOT_EXIST", - "FABRIC_E_ENUMERATION_COMPLETED", "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND", - "FABRIC_E_KEY_NOT_FOUND", "FABRIC_E_HEALTH_ENTITY_NOT_FOUND", - "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS", "FABRIC_E_APPLICATION_ALREADY_EXISTS", - "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION", - "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS", "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS", - "FABRIC_E_SERVICE_ALREADY_EXISTS", "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS", - "FABRIC_E_APPLICATION_TYPE_IN_USE", "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION", - "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS", "FABRIC_E_FABRIC_VERSION_IN_USE", - "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS", "FABRIC_E_NAME_ALREADY_EXISTS", - "FABRIC_E_NAME_NOT_EMPTY", "FABRIC_E_PROPERTY_CHECK_FAILED", - "FABRIC_E_SERVICE_METADATA_MISMATCH", "FABRIC_E_SERVICE_TYPE_MISMATCH", - "FABRIC_E_HEALTH_STALE_REPORT", "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED", - "FABRIC_E_NODE_HAS_NOT_STOPPED_YET", "FABRIC_E_INSTANCE_ID_MISMATCH", - "FABRIC_E_VALUE_TOO_LARGE", "FABRIC_E_NO_WRITE_QUORUM", "FABRIC_E_NOT_PRIMARY", - "FABRIC_E_NOT_READY", "FABRIC_E_RECONFIGURATION_PENDING", "FABRIC_E_SERVICE_OFFLINE", - "E_ABORT", "FABRIC_E_COMMUNICATION_ERROR", "FABRIC_E_OPERATION_NOT_COMPLETE", - "FABRIC_E_TIMEOUT", "FABRIC_E_NODE_IS_UP", "E_FAIL", "FABRIC_E_BACKUP_IS_ENABLED", - "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH", "FABRIC_E_INVALID_FOR_STATELESS_SERVICES", - "FABRIC_E_BACKUP_NOT_ENABLED", "FABRIC_E_BACKUP_POLICY_NOT_EXISTING", - "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING", "FABRIC_E_BACKUP_IN_PROGRESS", - "FABRIC_E_RESTORE_IN_PROGRESS", "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING", - "FABRIC_E_INVALID_SERVICE_SCALING_POLICY", "E_INVALIDARG", - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS", - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND", "FABRIC_E_VOLUME_ALREADY_EXISTS", - "FABRIC_E_VOLUME_NOT_FOUND", "SerializationError", - "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR". + :param code: Required. Defines the fabric error codes that be returned as + part of the error object in response to Service Fabric API operations that + are not successful. Following are the error code values that can be + returned for a specific HTTP status code. + - Possible values of the error code for HTTP status code 400 (Bad Request) + - "FABRIC_E_INVALID_PARTITION_KEY" + - "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" + - "FABRIC_E_INVALID_ADDRESS" + - "FABRIC_E_APPLICATION_NOT_UPGRADING" + - "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" + - "FABRIC_E_FABRIC_NOT_UPGRADING" + - "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" + - "FABRIC_E_INVALID_CONFIGURATION" + - "FABRIC_E_INVALID_NAME_URI" + - "FABRIC_E_PATH_TOO_LONG" + - "FABRIC_E_KEY_TOO_LARGE" + - "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" + - "FABRIC_E_INVALID_ATOMIC_GROUP" + - "FABRIC_E_VALUE_EMPTY" + - "FABRIC_E_BACKUP_IS_ENABLED" + - "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + - "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + - "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + - "E_INVALIDARG" + - Possible values of the error code for HTTP status code 404 (Not Found) + - "FABRIC_E_NODE_NOT_FOUND" + - "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" + - "FABRIC_E_APPLICATION_NOT_FOUND" + - "FABRIC_E_SERVICE_TYPE_NOT_FOUND" + - "FABRIC_E_SERVICE_DOES_NOT_EXIST" + - "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" + - "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" + - "FABRIC_E_PARTITION_NOT_FOUND" + - "FABRIC_E_REPLICA_DOES_NOT_EXIST" + - "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" + - "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" + - "FABRIC_E_DIRECTORY_NOT_FOUND" + - "FABRIC_E_FABRIC_VERSION_NOT_FOUND" + - "FABRIC_E_FILE_NOT_FOUND" + - "FABRIC_E_NAME_DOES_NOT_EXIST" + - "FABRIC_E_PROPERTY_DOES_NOT_EXIST" + - "FABRIC_E_ENUMERATION_COMPLETED" + - "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" + - "FABRIC_E_KEY_NOT_FOUND" + - "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + - "FABRIC_E_BACKUP_NOT_ENABLED" + - "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + - "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" + - "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" + - Possible values of the error code for HTTP status code 409 (Conflict) + - "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" + - "FABRIC_E_APPLICATION_ALREADY_EXISTS" + - "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" + - "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" + - "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" + - "FABRIC_E_SERVICE_ALREADY_EXISTS" + - "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" + - "FABRIC_E_APPLICATION_TYPE_IN_USE" + - "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" + - "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" + - "FABRIC_E_FABRIC_VERSION_IN_USE" + - "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" + - "FABRIC_E_NAME_ALREADY_EXISTS" + - "FABRIC_E_NAME_NOT_EMPTY" + - "FABRIC_E_PROPERTY_CHECK_FAILED" + - "FABRIC_E_SERVICE_METADATA_MISMATCH" + - "FABRIC_E_SERVICE_TYPE_MISMATCH" + - "FABRIC_E_HEALTH_STALE_REPORT" + - "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" + - "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" + - "FABRIC_E_INSTANCE_ID_MISMATCH" + - "FABRIC_E_BACKUP_IN_PROGRESS" + - "FABRIC_E_RESTORE_IN_PROGRESS" + - "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" + - Possible values of the error code for HTTP status code 413 (Request + Entity Too Large) + - "FABRIC_E_VALUE_TOO_LARGE" + - Possible values of the error code for HTTP status code 500 (Internal + Server Error) + - "FABRIC_E_NODE_IS_UP" + - "E_FAIL" + - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" + - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" + - "FABRIC_E_VOLUME_ALREADY_EXISTS" + - "FABRIC_E_VOLUME_NOT_FOUND" + - "SerializationError" + - Possible values of the error code for HTTP status code 503 (Service + Unavailable) + - "FABRIC_E_NO_WRITE_QUORUM" + - "FABRIC_E_NOT_PRIMARY" + - "FABRIC_E_NOT_READY" + - "FABRIC_E_RECONFIGURATION_PENDING" + - "FABRIC_E_SERVICE_OFFLINE" + - "E_ABORT" + - "FABRIC_E_VALUE_TOO_LARGE" + - Possible values of the error code for HTTP status code 504 (Gateway + Timeout) + - "FABRIC_E_COMMUNICATION_ERROR" + - "FABRIC_E_OPERATION_NOT_COMPLETE" + - "FABRIC_E_TIMEOUT". Possible values include: + 'FABRIC_E_INVALID_PARTITION_KEY', + 'FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR', 'FABRIC_E_INVALID_ADDRESS', + 'FABRIC_E_APPLICATION_NOT_UPGRADING', + 'FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR', + 'FABRIC_E_FABRIC_NOT_UPGRADING', + 'FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR', + 'FABRIC_E_INVALID_CONFIGURATION', 'FABRIC_E_INVALID_NAME_URI', + 'FABRIC_E_PATH_TOO_LONG', 'FABRIC_E_KEY_TOO_LARGE', + 'FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED', + 'FABRIC_E_INVALID_ATOMIC_GROUP', 'FABRIC_E_VALUE_EMPTY', + 'FABRIC_E_NODE_NOT_FOUND', 'FABRIC_E_APPLICATION_TYPE_NOT_FOUND', + 'FABRIC_E_APPLICATION_NOT_FOUND', 'FABRIC_E_SERVICE_TYPE_NOT_FOUND', + 'FABRIC_E_SERVICE_DOES_NOT_EXIST', + 'FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND', + 'FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND', + 'FABRIC_E_PARTITION_NOT_FOUND', 'FABRIC_E_REPLICA_DOES_NOT_EXIST', + 'FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST', + 'FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND', + 'FABRIC_E_DIRECTORY_NOT_FOUND', 'FABRIC_E_FABRIC_VERSION_NOT_FOUND', + 'FABRIC_E_FILE_NOT_FOUND', 'FABRIC_E_NAME_DOES_NOT_EXIST', + 'FABRIC_E_PROPERTY_DOES_NOT_EXIST', 'FABRIC_E_ENUMERATION_COMPLETED', + 'FABRIC_E_SERVICE_MANIFEST_NOT_FOUND', 'FABRIC_E_KEY_NOT_FOUND', + 'FABRIC_E_HEALTH_ENTITY_NOT_FOUND', + 'FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS', + 'FABRIC_E_APPLICATION_ALREADY_EXISTS', + 'FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION', + 'FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS', + 'FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS', + 'FABRIC_E_SERVICE_ALREADY_EXISTS', + 'FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS', + 'FABRIC_E_APPLICATION_TYPE_IN_USE', + 'FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION', + 'FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS', + 'FABRIC_E_FABRIC_VERSION_IN_USE', 'FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS', + 'FABRIC_E_NAME_ALREADY_EXISTS', 'FABRIC_E_NAME_NOT_EMPTY', + 'FABRIC_E_PROPERTY_CHECK_FAILED', 'FABRIC_E_SERVICE_METADATA_MISMATCH', + 'FABRIC_E_SERVICE_TYPE_MISMATCH', 'FABRIC_E_HEALTH_STALE_REPORT', + 'FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED', + 'FABRIC_E_NODE_HAS_NOT_STOPPED_YET', 'FABRIC_E_INSTANCE_ID_MISMATCH', + 'FABRIC_E_VALUE_TOO_LARGE', 'FABRIC_E_NO_WRITE_QUORUM', + 'FABRIC_E_NOT_PRIMARY', 'FABRIC_E_NOT_READY', + 'FABRIC_E_RECONFIGURATION_PENDING', 'FABRIC_E_SERVICE_OFFLINE', 'E_ABORT', + 'FABRIC_E_COMMUNICATION_ERROR', 'FABRIC_E_OPERATION_NOT_COMPLETE', + 'FABRIC_E_TIMEOUT', 'FABRIC_E_NODE_IS_UP', 'E_FAIL', + 'FABRIC_E_BACKUP_IS_ENABLED', + 'FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH', + 'FABRIC_E_INVALID_FOR_STATELESS_SERVICES', 'FABRIC_E_BACKUP_NOT_ENABLED', + 'FABRIC_E_BACKUP_POLICY_NOT_EXISTING', + 'FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING', + 'FABRIC_E_BACKUP_IN_PROGRESS', 'FABRIC_E_RESTORE_IN_PROGRESS', + 'FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING', + 'FABRIC_E_INVALID_SERVICE_SCALING_POLICY', 'E_INVALIDARG', + 'FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS', + 'FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND', + 'FABRIC_E_VOLUME_ALREADY_EXISTS', 'FABRIC_E_VOLUME_NOT_FOUND', + 'SerializationError', 'FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR' :type code: str or ~azure.servicefabric.models.FabricErrorCodes :param message: Error message. :type message: str @@ -10898,27 +10120,22 @@ class FabricErrorError(msrest.serialization.Model): 'message': {'key': 'Message', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FabricErrorError, self).__init__(**kwargs) - self.code = kwargs['code'] + self.code = kwargs.get('code', None) self.message = kwargs.get('message', None) -class PropertyBatchInfo(msrest.serialization.Model): +class PropertyBatchInfo(Model): """Information about the results of a property batch. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FailedPropertyBatchInfo, SuccessfulPropertyBatchInfo. + sub-classes are: SuccessfulPropertyBatchInfo, FailedPropertyBatchInfo All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch info, determined by the results of a property - batch. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Successful", "Failed". - :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -10930,30 +10147,28 @@ class PropertyBatchInfo(msrest.serialization.Model): } _subtype_map = { - 'kind': {'Failed': 'FailedPropertyBatchInfo', 'Successful': 'SuccessfulPropertyBatchInfo'} + 'kind': {'Successful': 'SuccessfulPropertyBatchInfo', 'Failed': 'FailedPropertyBatchInfo'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PropertyBatchInfo, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class FailedPropertyBatchInfo(PropertyBatchInfo): - """Derived from PropertyBatchInfo. Represents the property batch failing. Contains information about the specific batch failure. + """Derived from PropertyBatchInfo. Represents the property batch failing. + Contains information about the specific batch failure. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch info, determined by the results of a property - batch. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Successful", "Failed". - :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind - :param error_message: The error message of the failed operation. Describes the exception thrown - due to the first unsuccessful operation in the property batch. + :param kind: Required. Constant filled by server. + :type kind: str + :param error_message: The error message of the failed operation. Describes + the exception thrown due to the first unsuccessful operation in the + property batch. :type error_message: str - :param operation_index: The index of the unsuccessful operation in the property batch. + :param operation_index: The index of the unsuccessful operation in the + property batch. :type operation_index: int """ @@ -10967,23 +10182,23 @@ class FailedPropertyBatchInfo(PropertyBatchInfo): 'operation_index': {'key': 'OperationIndex', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FailedPropertyBatchInfo, self).__init__(**kwargs) - self.kind = 'Failed' # type: str self.error_message = kwargs.get('error_message', None) self.operation_index = kwargs.get('operation_index', None) + self.kind = 'Failed' -class FailedUpgradeDomainProgressObject(msrest.serialization.Model): - """The detailed upgrade progress for nodes in the current upgrade domain at the point of failure. +class FailedUpgradeDomainProgressObject(Model): + """The detailed upgrade progress for nodes in the current upgrade domain at + the point of failure. - :param domain_name: The name of the upgrade domain. + :param domain_name: The name of the upgrade domain :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their statuses. - :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their + statuses + :type node_upgrade_progress_list: + list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -10991,22 +10206,22 @@ class FailedUpgradeDomainProgressObject(msrest.serialization.Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FailedUpgradeDomainProgressObject, self).__init__(**kwargs) self.domain_name = kwargs.get('domain_name', None) self.node_upgrade_progress_list = kwargs.get('node_upgrade_progress_list', None) -class FailureUpgradeDomainProgressInfo(msrest.serialization.Model): - """Information about the upgrade domain progress at the time of upgrade failure. +class FailureUpgradeDomainProgressInfo(Model): + """Information about the upgrade domain progress at the time of upgrade + failure. - :param domain_name: The name of the upgrade domain. + :param domain_name: The name of the upgrade domain :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their statuses. - :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their + statuses + :type node_upgrade_progress_list: + list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -11014,25 +10229,24 @@ class FailureUpgradeDomainProgressInfo(msrest.serialization.Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FailureUpgradeDomainProgressInfo, self).__init__(**kwargs) self.domain_name = kwargs.get('domain_name', None) self.node_upgrade_progress_list = kwargs.get('node_upgrade_progress_list', None) -class FileInfo(msrest.serialization.Model): +class FileInfo(Model): """Information about a image store file. :param file_size: The size of file in bytes. :type file_size: str :param file_version: Information about the version of image store file. :type file_version: ~azure.servicefabric.models.FileVersion - :param modified_date: The date and time when the image store file was last modified. - :type modified_date: ~datetime.datetime - :param store_relative_path: The file path relative to the image store root path. + :param modified_date: The date and time when the image store file was last + modified. + :type modified_date: datetime + :param store_relative_path: The file path relative to the image store root + path. :type store_relative_path: str """ @@ -11043,10 +10257,7 @@ class FileInfo(msrest.serialization.Model): 'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FileInfo, self).__init__(**kwargs) self.file_size = kwargs.get('file_size', None) self.file_version = kwargs.get('file_version', None) @@ -11055,17 +10266,17 @@ def __init__( class FileShareBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for file share storage used for storing or enumerating backups. + """Describes the parameters for file share storage used for storing or + enumerating backups. All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param path: Required. UNC path of the file share where to store or enumerate backups from. + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param path: Required. UNC path of the file share where to store or + enumerate backups from. :type path: str :param primary_user_name: Primary user name to access the file share. :type primary_user_name: str @@ -11073,7 +10284,7 @@ class FileShareBackupStorageDescription(BackupStorageDescription): :type primary_password: str :param secondary_user_name: Secondary user name to access the file share. :type secondary_user_name: str - :param secondary_password: Secondary password to access the share location. + :param secondary_password: Secondary password to access the share location :type secondary_password: str """ @@ -11083,8 +10294,8 @@ class FileShareBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'path': {'key': 'Path', 'type': 'str'}, 'primary_user_name': {'key': 'PrimaryUserName', 'type': 'str'}, 'primary_password': {'key': 'PrimaryPassword', 'type': 'str'}, @@ -11092,30 +10303,27 @@ class FileShareBackupStorageDescription(BackupStorageDescription): 'secondary_password': {'key': 'SecondaryPassword', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FileShareBackupStorageDescription, self).__init__(**kwargs) - self.storage_kind = 'FileShare' # type: str - self.path = kwargs['path'] + self.path = kwargs.get('path', None) self.primary_user_name = kwargs.get('primary_user_name', None) self.primary_password = kwargs.get('primary_password', None) self.secondary_user_name = kwargs.get('secondary_user_name', None) self.secondary_password = kwargs.get('secondary_password', None) + self.storage_kind = 'FileShare' -class FileVersion(msrest.serialization.Model): +class FileVersion(Model): """Information about the version of image store file. - :param version_number: The current image store version number for the file is used in image - store for checking whether it need to be updated. + :param version_number: The current image store version number for the file + is used in image store for checking whether it need to be updated. :type version_number: str - :param epoch_data_loss_number: The epoch data loss number of image store replica when this file - entry was updated or created. + :param epoch_data_loss_number: The epoch data loss number of image store + replica when this file entry was updated or created. :type epoch_data_loss_number: str - :param epoch_configuration_number: The epoch configuration version number of the image store - replica when this file entry was created or updated. + :param epoch_configuration_number: The epoch configuration version number + of the image store replica when this file entry was created or updated. :type epoch_configuration_number: str """ @@ -11125,21 +10333,19 @@ class FileVersion(msrest.serialization.Model): 'epoch_configuration_number': {'key': 'EpochConfigurationNumber', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FileVersion, self).__init__(**kwargs) self.version_number = kwargs.get('version_number', None) self.epoch_data_loss_number = kwargs.get('epoch_data_loss_number', None) self.epoch_configuration_number = kwargs.get('epoch_configuration_number', None) -class FolderInfo(msrest.serialization.Model): - """Information about a image store folder. It includes how many files this folder contains and its image store relative path. +class FolderInfo(Model): + """Information about a image store folder. It includes how many files this + folder contains and its image store relative path. - :param store_relative_path: The remote location within image store. This path is relative to - the image store root. + :param store_relative_path: The remote location within image store. This + path is relative to the image store root. :type store_relative_path: str :param file_count: The number of files from within the image store folder. :type file_count: str @@ -11150,20 +10356,17 @@ class FolderInfo(msrest.serialization.Model): 'file_count': {'key': 'FileCount', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FolderInfo, self).__init__(**kwargs) self.store_relative_path = kwargs.get('store_relative_path', None) self.file_count = kwargs.get('file_count', None) -class FolderSizeInfo(msrest.serialization.Model): +class FolderSizeInfo(Model): """Information of a image store folder size. - :param store_relative_path: The remote location within image store. This path is relative to - the image store root. + :param store_relative_path: The remote location within image store. This + path is relative to the image store root. :type store_relative_path: str :param folder_size: The size of folder in bytes. :type folder_size: str @@ -11174,10 +10377,7 @@ class FolderSizeInfo(msrest.serialization.Model): 'folder_size': {'key': 'FolderSize', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FolderSizeInfo, self).__init__(**kwargs) self.store_relative_path = kwargs.get('store_relative_path', None) self.folder_size = kwargs.get('folder_size', None) @@ -11188,14 +10388,12 @@ class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. The kind of backup schedule, time based or frequency - based.Constant filled by server. Possible values include: "Invalid", "TimeBased", - "FrequencyBased". - :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind - :param interval: Required. Defines the interval with which backups are periodically taken. It - should be specified in ISO8601 format. Timespan in seconds is not supported and will be ignored - while creating the policy. - :type interval: ~datetime.timedelta + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + :param interval: Required. Defines the interval with which backups are + periodically taken. It should be specified in ISO8601 format. Timespan in + seconds is not supported and will be ignored while creating the policy. + :type interval: timedelta """ _validation = { @@ -11208,21 +10406,19 @@ class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): 'interval': {'key': 'Interval', 'type': 'duration'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(FrequencyBasedBackupScheduleDescription, self).__init__(**kwargs) - self.schedule_kind = 'FrequencyBased' # type: str - self.interval = kwargs['interval'] + self.interval = kwargs.get('interval', None) + self.schedule_kind = 'FrequencyBased' -class GatewayDestination(msrest.serialization.Model): +class GatewayDestination(Model): """Describes destination endpoint for routing traffic. All required parameters must be populated in order to send to Azure. - :param application_name: Required. Name of the service fabric Mesh application. + :param application_name: Required. Name of the service fabric Mesh + application. :type application_name: str :param service_name: Required. service that contains the endpoint. :type service_name: str @@ -11242,20 +10438,18 @@ class GatewayDestination(msrest.serialization.Model): 'endpoint_name': {'key': 'endpointName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(GatewayDestination, self).__init__(**kwargs) - self.application_name = kwargs['application_name'] - self.service_name = kwargs['service_name'] - self.endpoint_name = kwargs['endpoint_name'] + self.application_name = kwargs.get('application_name', None) + self.service_name = kwargs.get('service_name', None) + self.endpoint_name = kwargs.get('endpoint_name', None) -class GatewayResourceDescription(msrest.serialization.Model): +class GatewayResourceDescription(Model): """This type describes a gateway resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. @@ -11263,21 +10457,24 @@ class GatewayResourceDescription(msrest.serialization.Model): :type name: str :param description: User readable description of the gateway. :type description: str - :param source_network: Required. Network the gateway should listen on for requests. + :param source_network: Required. Network the gateway should listen on for + requests. :type source_network: ~azure.servicefabric.models.NetworkRef - :param destination_network: Required. Network that the Application is using. + :param destination_network: Required. Network that the Application is + using. :type destination_network: ~azure.servicefabric.models.NetworkRef :param tcp: Configuration for tcp connectivity for this gateway. :type tcp: list[~azure.servicefabric.models.TcpConfig] :param http: Configuration for http connectivity for this gateway. :type http: list[~azure.servicefabric.models.HttpConfig] - :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the resource. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the gateway. + :ivar status_details: Gives additional information about the current + status of the gateway. :vartype status_details: str - :ivar ip_address: IP address of the gateway. This is populated in the response and is ignored - for incoming requests. + :ivar ip_address: IP address of the gateway. This is populated in the + response and is ignored for incoming requests. :vartype ip_address: str """ @@ -11302,15 +10499,12 @@ class GatewayResourceDescription(msrest.serialization.Model): 'ip_address': {'key': 'properties.ipAddress', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(GatewayResourceDescription, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) - self.source_network = kwargs['source_network'] - self.destination_network = kwargs['destination_network'] + self.source_network = kwargs.get('source_network', None) + self.destination_network = kwargs.get('destination_network', None) self.tcp = kwargs.get('tcp', None) self.http = kwargs.get('http', None) self.status = None @@ -11318,27 +10512,33 @@ def __init__( self.ip_address = None -class GetBackupByStorageQueryDescription(msrest.serialization.Model): - """Describes additional filters to be applied, while listing backups, and backup storage details from where to fetch the backups. +class GetBackupByStorageQueryDescription(Model): + """Describes additional filters to be applied, while listing backups, and + backup storage details from where to fetch the backups. All required parameters must be populated in order to send to Azure. - :param start_date_time_filter: Specifies the start date time in ISO8601 from which to enumerate - backups. If not specified, backups are enumerated from the beginning. - :type start_date_time_filter: ~datetime.datetime - :param end_date_time_filter: Specifies the end date time in ISO8601 till which to enumerate - backups. If not specified, backups are enumerated till the end. - :type end_date_time_filter: ~datetime.datetime - :param latest: If specified as true, gets the most recent backup (within the specified time - range) for every partition under the specified backup entity. + :param start_date_time_filter: Specifies the start date time in ISO8601 + from which to enumerate backups. If not specified, backups are enumerated + from the beginning. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specifies the end date time in ISO8601 till + which to enumerate backups. If not specified, backups are enumerated till + the end. + :type end_date_time_filter: datetime + :param latest: If specified as true, gets the most recent backup (within + the specified time range) for every partition under the specified backup + entity. Default value: False . :type latest: bool - :param storage: Required. Describes the parameters for the backup storage from where to - enumerate backups. This is optional and by default backups are enumerated from the backup - storage where this backup entity is currently being backed up (as specified in backup policy). - This parameter is useful to be able to enumerate backups from another cluster where you may - intend to restore. + :param storage: Required. Describes the parameters for the backup storage + from where to enumerate backups. This is optional and by default backups + are enumerated from the backup storage where this backup entity is + currently being backed up (as specified in backup policy). This parameter + is useful to be able to enumerate backups from another cluster where you + may intend to restore. :type storage: ~azure.servicefabric.models.BackupStorageDescription - :param backup_entity: Required. Indicates the entity for which to enumerate backups. + :param backup_entity: Required. Indicates the entity for which to + enumerate backups. :type backup_entity: ~azure.servicefabric.models.BackupEntity """ @@ -11355,54 +10555,49 @@ class GetBackupByStorageQueryDescription(msrest.serialization.Model): 'backup_entity': {'key': 'BackupEntity', 'type': 'BackupEntity'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(GetBackupByStorageQueryDescription, self).__init__(**kwargs) self.start_date_time_filter = kwargs.get('start_date_time_filter', None) self.end_date_time_filter = kwargs.get('end_date_time_filter', None) self.latest = kwargs.get('latest', False) - self.storage = kwargs['storage'] - self.backup_entity = kwargs['backup_entity'] + self.storage = kwargs.get('storage', None) + self.backup_entity = kwargs.get('backup_entity', None) class GetPropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that gets the specified property if it exists. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that gets the specified property if it + exists. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param include_value: Whether or not to return the property value with the metadata. - True if values should be returned with the metadata; False to return only property metadata. + :param kind: Required. Constant filled by server. + :type kind: str + :param include_value: Whether or not to return the property value with the + metadata. + True if values should be returned with the metadata; False to return only + property metadata. Default value: False . :type include_value: bool """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'include_value': {'key': 'IncludeValue', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(GetPropertyBatchOperation, self).__init__(**kwargs) - self.kind = 'Get' # type: str self.include_value = kwargs.get('include_value', False) + self.kind = 'Get' class GuidPropertyValue(PropertyValue): @@ -11410,10 +10605,8 @@ class GuidPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str :param data: Required. The data of the property value. :type data: str """ @@ -11428,20 +10621,18 @@ class GuidPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(GuidPropertyValue, self).__init__(**kwargs) - self.kind = 'Guid' # type: str - self.data = kwargs['data'] + self.data = kwargs.get('data', None) + self.kind = 'Guid' -class HealthEvaluationWrapper(msrest.serialization.Model): +class HealthEvaluationWrapper(Model): """Wrapper object for health evaluation. - :param health_evaluation: Represents a health evaluation which describes the data and the - algorithm used by health manager to evaluate the health of an entity. + :param health_evaluation: Represents a health evaluation which describes + the data and the algorithm used by health manager to evaluate the health + of an entity. :type health_evaluation: ~azure.servicefabric.models.HealthEvaluation """ @@ -11449,79 +10640,86 @@ class HealthEvaluationWrapper(msrest.serialization.Model): 'health_evaluation': {'key': 'HealthEvaluation', 'type': 'HealthEvaluation'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HealthEvaluationWrapper, self).__init__(**kwargs) self.health_evaluation = kwargs.get('health_evaluation', None) -class HealthInformation(msrest.serialization.Model): - """Represents common health report information. It is included in all health reports sent to health store and in all health events returned by health queries. +class HealthInformation(Model): + """Represents common health report information. It is included in all health + reports sent to health store and in all health events returned by health + queries. All required parameters must be populated in order to send to Azure. - :param source_id: Required. The source name that identifies the client/watchdog/system - component that generated the health information. + :param source_id: Required. The source name that identifies the + client/watchdog/system component that generated the health information. :type source_id: str - :param property: Required. The property of the health information. An entity can have health - reports for different properties. - The property is a string and not a fixed enumeration to allow the reporter flexibility to - categorize the state condition that triggers the report. - For example, a reporter with SourceId "LocalWatchdog" can monitor the state of the available - disk on a node, + :param property: Required. The property of the health information. An + entity can have health reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter + flexibility to categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the + state of the available disk on a node, so it can report "AvailableDisk" property on that node. - The same reporter can monitor the node connectivity, so it can report a property - "Connectivity" on the same node. - In the health store, these reports are treated as separate health events for the specified - node. - - Together with the SourceId, the property uniquely identifies the health information. + The same reporter can monitor the node connectivity, so it can report a + property "Connectivity" on the same node. + In the health store, these reports are treated as separate health events + for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. :type property: str - :param health_state: Required. The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: Required. The health state of a Service Fabric entity + such as Cluster, Node, Application, Service, Partition, Replica etc. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param time_to_live_in_milli_seconds: The duration for which this health report is valid. This - field uses ISO8601 format for specifying the duration. - When clients report periodically, they should send reports with higher frequency than time to - live. - If clients report on transition, they can set the time to live to infinite. - When time to live expires, the health event that contains the health information - is either removed from health store, if RemoveWhenExpired is true, or evaluated at error, if - RemoveWhenExpired false. - + :param time_to_live_in_milli_seconds: The duration for which this health + report is valid. This field uses ISO8601 format for specifying the + duration. + When clients report periodically, they should send reports with higher + frequency than time to live. + If clients report on transition, they can set the time to live to + infinite. + When time to live expires, the health event that contains the health + information + is either removed from health store, if RemoveWhenExpired is true, or + evaluated at error, if RemoveWhenExpired false. If not specified, time to live defaults to infinite value. - :type time_to_live_in_milli_seconds: ~datetime.timedelta - :param description: The description of the health information. It represents free text used to - add human readable information about the report. + :type time_to_live_in_milli_seconds: timedelta + :param description: The description of the health information. It + represents free text used to add human readable information about the + report. The maximum string length for the description is 4096 characters. If the provided string is longer, it will be automatically truncated. - When truncated, the last characters of the description contain a marker "[Truncated]", and - total string size is 4096 characters. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. The presence of the marker indicates to users that truncation occurred. - Note that when truncated, the description has less than 4096 characters from the original - string. + Note that when truncated, the description has less than 4096 characters + from the original string. :type description: str - :param sequence_number: The sequence number for this health report as a numeric string. - The report sequence number is used by the health store to detect stale reports. - If not specified, a sequence number is auto-generated by the health client when a report is - added. + :param sequence_number: The sequence number for this health report as a + numeric string. + The report sequence number is used by the health store to detect stale + reports. + If not specified, a sequence number is auto-generated by the health client + when a report is added. :type sequence_number: str - :param remove_when_expired: Value that indicates whether the report is removed from health - store when it expires. - If set to true, the report is removed from the health store after it expires. - If set to false, the report is treated as an error when expired. The value of this property is - false by default. - When clients report periodically, they should set RemoveWhenExpired false (default). - This way, if the reporter has issues (e.g. deadlock) and can't report, the entity is evaluated - at error when the health report expires. + :param remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. + If set to true, the report is removed from the health store after it + expires. + If set to false, the report is treated as an error when expired. The value + of this property is false by default. + When clients report periodically, they should set RemoveWhenExpired false + (default). + This way, if the reporter has issues (e.g. deadlock) and can't report, the + entity is evaluated at error when the health report expires. This flags the entity as being in Error health state. :type remove_when_expired: bool - :param health_report_id: A health report ID which identifies the health report and can be used - to find more detailed information about a specific health event at - aka.ms/sfhealthid. + :param health_report_id: A health report ID which identifies the health + report and can be used to find more detailed information about a specific + health event at + aka.ms/sfhealthid :type health_report_id: str """ @@ -11542,14 +10740,11 @@ class HealthInformation(msrest.serialization.Model): 'health_report_id': {'key': 'HealthReportId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HealthInformation, self).__init__(**kwargs) - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) self.time_to_live_in_milli_seconds = kwargs.get('time_to_live_in_milli_seconds', None) self.description = kwargs.get('description', None) self.sequence_number = kwargs.get('sequence_number', None) @@ -11558,108 +10753,121 @@ def __init__( class HealthEvent(HealthInformation): - """Represents health information reported on a health entity, such as cluster, application or node, with additional metadata added by the Health Manager. + """Represents health information reported on a health entity, such as cluster, + application or node, with additional metadata added by the Health Manager. All required parameters must be populated in order to send to Azure. - :param source_id: Required. The source name that identifies the client/watchdog/system - component that generated the health information. + :param source_id: Required. The source name that identifies the + client/watchdog/system component that generated the health information. :type source_id: str - :param property: Required. The property of the health information. An entity can have health - reports for different properties. - The property is a string and not a fixed enumeration to allow the reporter flexibility to - categorize the state condition that triggers the report. - For example, a reporter with SourceId "LocalWatchdog" can monitor the state of the available - disk on a node, + :param property: Required. The property of the health information. An + entity can have health reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter + flexibility to categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the + state of the available disk on a node, so it can report "AvailableDisk" property on that node. - The same reporter can monitor the node connectivity, so it can report a property - "Connectivity" on the same node. - In the health store, these reports are treated as separate health events for the specified - node. - - Together with the SourceId, the property uniquely identifies the health information. + The same reporter can monitor the node connectivity, so it can report a + property "Connectivity" on the same node. + In the health store, these reports are treated as separate health events + for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. :type property: str - :param health_state: Required. The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: Required. The health state of a Service Fabric entity + such as Cluster, Node, Application, Service, Partition, Replica etc. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param time_to_live_in_milli_seconds: The duration for which this health report is valid. This - field uses ISO8601 format for specifying the duration. - When clients report periodically, they should send reports with higher frequency than time to - live. - If clients report on transition, they can set the time to live to infinite. - When time to live expires, the health event that contains the health information - is either removed from health store, if RemoveWhenExpired is true, or evaluated at error, if - RemoveWhenExpired false. - + :param time_to_live_in_milli_seconds: The duration for which this health + report is valid. This field uses ISO8601 format for specifying the + duration. + When clients report periodically, they should send reports with higher + frequency than time to live. + If clients report on transition, they can set the time to live to + infinite. + When time to live expires, the health event that contains the health + information + is either removed from health store, if RemoveWhenExpired is true, or + evaluated at error, if RemoveWhenExpired false. If not specified, time to live defaults to infinite value. - :type time_to_live_in_milli_seconds: ~datetime.timedelta - :param description: The description of the health information. It represents free text used to - add human readable information about the report. + :type time_to_live_in_milli_seconds: timedelta + :param description: The description of the health information. It + represents free text used to add human readable information about the + report. The maximum string length for the description is 4096 characters. If the provided string is longer, it will be automatically truncated. - When truncated, the last characters of the description contain a marker "[Truncated]", and - total string size is 4096 characters. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. The presence of the marker indicates to users that truncation occurred. - Note that when truncated, the description has less than 4096 characters from the original - string. + Note that when truncated, the description has less than 4096 characters + from the original string. :type description: str - :param sequence_number: The sequence number for this health report as a numeric string. - The report sequence number is used by the health store to detect stale reports. - If not specified, a sequence number is auto-generated by the health client when a report is - added. + :param sequence_number: The sequence number for this health report as a + numeric string. + The report sequence number is used by the health store to detect stale + reports. + If not specified, a sequence number is auto-generated by the health client + when a report is added. :type sequence_number: str - :param remove_when_expired: Value that indicates whether the report is removed from health - store when it expires. - If set to true, the report is removed from the health store after it expires. - If set to false, the report is treated as an error when expired. The value of this property is - false by default. - When clients report periodically, they should set RemoveWhenExpired false (default). - This way, if the reporter has issues (e.g. deadlock) and can't report, the entity is evaluated - at error when the health report expires. + :param remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. + If set to true, the report is removed from the health store after it + expires. + If set to false, the report is treated as an error when expired. The value + of this property is false by default. + When clients report periodically, they should set RemoveWhenExpired false + (default). + This way, if the reporter has issues (e.g. deadlock) and can't report, the + entity is evaluated at error when the health report expires. This flags the entity as being in Error health state. :type remove_when_expired: bool - :param health_report_id: A health report ID which identifies the health report and can be used - to find more detailed information about a specific health event at - aka.ms/sfhealthid. + :param health_report_id: A health report ID which identifies the health + report and can be used to find more detailed information about a specific + health event at + aka.ms/sfhealthid :type health_report_id: str - :param is_expired: Returns true if the health event is expired, otherwise false. + :param is_expired: Returns true if the health event is expired, otherwise + false. :type is_expired: bool - :param source_utc_timestamp: The date and time when the health report was sent by the source. - :type source_utc_timestamp: ~datetime.datetime - :param last_modified_utc_timestamp: The date and time when the health report was last modified - by the health store. - :type last_modified_utc_timestamp: ~datetime.datetime - :param last_ok_transition_at: If the current health state is 'Ok', this property returns the - time at which the health report was first reported with 'Ok'. - For periodic reporting, many reports with the same state may have been generated. - This property returns the date and time when the first 'Ok' health report was received. - - If the current health state is 'Error' or 'Warning', returns the date and time at which the - health state was last in 'Ok', before transitioning to a different state. - + :param source_utc_timestamp: The date and time when the health report was + sent by the source. + :type source_utc_timestamp: datetime + :param last_modified_utc_timestamp: The date and time when the health + report was last modified by the health store. + :type last_modified_utc_timestamp: datetime + :param last_ok_transition_at: If the current health state is 'Ok', this + property returns the time at which the health report was first reported + with 'Ok'. + For periodic reporting, many reports with the same state may have been + generated. + This property returns the date and time when the first 'Ok' health report + was received. + If the current health state is 'Error' or 'Warning', returns the date and + time at which the health state was last in 'Ok', before transitioning to a + different state. If the health state was never 'Ok', the value will be zero date-time. - :type last_ok_transition_at: ~datetime.datetime - :param last_warning_transition_at: If the current health state is 'Warning', this property - returns the time at which the health report was first reported with 'Warning'. For periodic - reporting, many reports with the same state may have been generated however, this property - returns only the date and time at the first 'Warning' health report was received. - - If the current health state is 'Ok' or 'Error', returns the date and time at which the health - state was last in 'Warning', before transitioning to a different state. - + :type last_ok_transition_at: datetime + :param last_warning_transition_at: If the current health state is + 'Warning', this property returns the time at which the health report was + first reported with 'Warning'. For periodic reporting, many reports with + the same state may have been generated however, this property returns only + the date and time at the first 'Warning' health report was received. + If the current health state is 'Ok' or 'Error', returns the date and time + at which the health state was last in 'Warning', before transitioning to a + different state. If the health state was never 'Warning', the value will be zero date-time. - :type last_warning_transition_at: ~datetime.datetime - :param last_error_transition_at: If the current health state is 'Error', this property returns - the time at which the health report was first reported with 'Error'. For periodic reporting, - many reports with the same state may have been generated however, this property returns only - the date and time at the first 'Error' health report was received. - - If the current health state is 'Ok' or 'Warning', returns the date and time at which the - health state was last in 'Error', before transitioning to a different state. - + :type last_warning_transition_at: datetime + :param last_error_transition_at: If the current health state is 'Error', + this property returns the time at which the health report was first + reported with 'Error'. For periodic reporting, many reports with the same + state may have been generated however, this property returns only the date + and time at the first 'Error' health report was received. + If the current health state is 'Ok' or 'Warning', returns the date and + time at which the health state was last in 'Error', before transitioning + to a different state. If the health state was never 'Error', the value will be zero date-time. - :type last_error_transition_at: ~datetime.datetime + :type last_error_transition_at: datetime """ _validation = { @@ -11685,10 +10893,7 @@ class HealthEvent(HealthInformation): 'last_error_transition_at': {'key': 'LastErrorTransitionAt', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HealthEvent, self).__init__(**kwargs) self.is_expired = kwargs.get('is_expired', None) self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) @@ -11698,14 +10903,18 @@ def __init__( self.last_error_transition_at = kwargs.get('last_error_transition_at', None) -class HealthStateCount(msrest.serialization.Model): - """Represents information about how many health entities are in Ok, Warning and Error health state. +class HealthStateCount(Model): + """Represents information about how many health entities are in Ok, Warning + and Error health state. - :param ok_count: The number of health entities with aggregated health state Ok. + :param ok_count: The number of health entities with aggregated health + state Ok. :type ok_count: long - :param warning_count: The number of health entities with aggregated health state Warning. + :param warning_count: The number of health entities with aggregated health + state Warning. :type warning_count: long - :param error_count: The number of health entities with aggregated health state Error. + :param error_count: The number of health entities with aggregated health + state Error. :type error_count: long """ @@ -11721,48 +10930,49 @@ class HealthStateCount(msrest.serialization.Model): 'error_count': {'key': 'ErrorCount', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HealthStateCount, self).__init__(**kwargs) self.ok_count = kwargs.get('ok_count', None) self.warning_count = kwargs.get('warning_count', None) self.error_count = kwargs.get('error_count', None) -class HealthStatistics(msrest.serialization.Model): - """The health statistics of an entity, returned as part of the health query result when the query description is configured to include statistics. -The statistics include health state counts for all children types of the current entity. -For example, for cluster, the health statistics include health state counts for nodes, applications, services, partitions, replicas, deployed applications and deployed service packages. -For partition, the health statistics include health counts for replicas. +class HealthStatistics(Model): + """The health statistics of an entity, returned as part of the health query + result when the query description is configured to include statistics. + The statistics include health state counts for all children types of the + current entity. + For example, for cluster, the health statistics include health state counts + for nodes, applications, services, partitions, replicas, deployed + applications and deployed service packages. + For partition, the health statistics include health counts for replicas. - :param health_state_count_list: List of health state counts per entity kind, which keeps track - of how many children of the queried entity are in Ok, Warning and Error state. - :type health_state_count_list: list[~azure.servicefabric.models.EntityKindHealthStateCount] + :param health_state_count_list: List of health state counts per entity + kind, which keeps track of how many children of the queried entity are in + Ok, Warning and Error state. + :type health_state_count_list: + list[~azure.servicefabric.models.EntityKindHealthStateCount] """ _attribute_map = { 'health_state_count_list': {'key': 'HealthStateCountList', 'type': '[EntityKindHealthStateCount]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HealthStatistics, self).__init__(**kwargs) self.health_state_count_list = kwargs.get('health_state_count_list', None) -class HttpConfig(msrest.serialization.Model): - """Describes the http configuration for external connectivity for this network. +class HttpConfig(Model): + """Describes the http configuration for external connectivity for this + network. All required parameters must be populated in order to send to Azure. :param name: Required. http gateway config name. :type name: str - :param port: Required. Specifies the port at which the service endpoint below needs to be - exposed. + :param port: Required. Specifies the port at which the service endpoint + below needs to be exposed. :type port: int :param hosts: Required. description for routing. :type hosts: list[~azure.servicefabric.models.HttpHostConfig] @@ -11780,26 +10990,23 @@ class HttpConfig(msrest.serialization.Model): 'hosts': {'key': 'hosts', 'type': '[HttpHostConfig]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HttpConfig, self).__init__(**kwargs) - self.name = kwargs['name'] - self.port = kwargs['port'] - self.hosts = kwargs['hosts'] + self.name = kwargs.get('name', None) + self.port = kwargs.get('port', None) + self.hosts = kwargs.get('hosts', None) -class HttpHostConfig(msrest.serialization.Model): +class HttpHostConfig(Model): """Describes the hostname properties for http routing. All required parameters must be populated in order to send to Azure. :param name: Required. http hostname config name. :type name: str - :param routes: Required. Route information to use for routing. Routes are processed in the - order they are specified. Specify routes that are more specific before routes that can handle - general cases. + :param routes: Required. Route information to use for routing. Routes are + processed in the order they are specified. Specify routes that are more + specific before routes that can handle general cases. :type routes: list[~azure.servicefabric.models.HttpRouteConfig] """ @@ -11813,16 +11020,13 @@ class HttpHostConfig(msrest.serialization.Model): 'routes': {'key': 'routes', 'type': '[HttpRouteConfig]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HttpHostConfig, self).__init__(**kwargs) - self.name = kwargs['name'] - self.routes = kwargs['routes'] + self.name = kwargs.get('name', None) + self.routes = kwargs.get('routes', None) -class HttpRouteConfig(msrest.serialization.Model): +class HttpRouteConfig(Model): """Describes the hostname properties for http routing. All required parameters must be populated in order to send to Azure. @@ -11831,7 +11035,8 @@ class HttpRouteConfig(msrest.serialization.Model): :type name: str :param match: Required. Describes a rule for http route matching. :type match: ~azure.servicefabric.models.HttpRouteMatchRule - :param destination: Required. Describes destination endpoint for routing traffic. + :param destination: Required. Describes destination endpoint for routing + traffic. :type destination: ~azure.servicefabric.models.GatewayDestination """ @@ -11847,17 +11052,14 @@ class HttpRouteConfig(msrest.serialization.Model): 'destination': {'key': 'destination', 'type': 'GatewayDestination'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HttpRouteConfig, self).__init__(**kwargs) - self.name = kwargs['name'] - self.match = kwargs['match'] - self.destination = kwargs['destination'] + self.name = kwargs.get('name', None) + self.match = kwargs.get('match', None) + self.destination = kwargs.get('destination', None) -class HttpRouteMatchHeader(msrest.serialization.Model): +class HttpRouteMatchHeader(Model): """Describes header information for http route matching. All required parameters must be populated in order to send to Azure. @@ -11866,7 +11068,7 @@ class HttpRouteMatchHeader(msrest.serialization.Model): :type name: str :param value: Value of header to match in request. :type value: str - :param type: how to match header value. Possible values include: "exact". + :param type: how to match header value. Possible values include: 'exact' :type type: str or ~azure.servicefabric.models.HeaderMatchType """ @@ -11880,32 +11082,33 @@ class HttpRouteMatchHeader(msrest.serialization.Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HttpRouteMatchHeader, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) self.type = kwargs.get('type', None) -class HttpRouteMatchPath(msrest.serialization.Model): +class HttpRouteMatchPath(Model): """Path to match for routing. + Variables are only populated by the server, and will be ignored when + sending a request. + All required parameters must be populated in order to send to Azure. :param value: Required. Uri path to match for request. :type value: str :param rewrite: replacement string for matched part of the Uri. :type rewrite: str - :param type: Required. how to match value in the Uri. Possible values include: "prefix". - :type type: str or ~azure.servicefabric.models.PathMatchType + :ivar type: Required. how to match value in the Uri. Default value: + "prefix" . + :vartype type: str """ _validation = { 'value': {'required': True}, - 'type': {'required': True}, + 'type': {'required': True, 'constant': True}, } _attribute_map = { @@ -11914,17 +11117,15 @@ class HttpRouteMatchPath(msrest.serialization.Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + type = "prefix" + + def __init__(self, **kwargs): super(HttpRouteMatchPath, self).__init__(**kwargs) - self.value = kwargs['value'] + self.value = kwargs.get('value', None) self.rewrite = kwargs.get('rewrite', None) - self.type = kwargs['type'] -class HttpRouteMatchRule(msrest.serialization.Model): +class HttpRouteMatchRule(Model): """Describes a rule for http route matching. All required parameters must be populated in order to send to Azure. @@ -11944,32 +11145,32 @@ class HttpRouteMatchRule(msrest.serialization.Model): 'headers': {'key': 'headers', 'type': '[HttpRouteMatchHeader]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(HttpRouteMatchRule, self).__init__(**kwargs) - self.path = kwargs['path'] + self.path = kwargs.get('path', None) self.headers = kwargs.get('headers', None) -class IdentityDescription(msrest.serialization.Model): +class IdentityDescription(Model): """Information describing the identities associated with this application. All required parameters must be populated in order to send to Azure. - :param token_service_endpoint: the endpoint for the token service managing this identity. + :param token_service_endpoint: the endpoint for the token service managing + this identity :type token_service_endpoint: str - :param type: Required. the types of identities associated with this resource; currently - restricted to 'SystemAssigned and UserAssigned'. + :param type: Required. the types of identities associated with this + resource; currently restricted to 'SystemAssigned and UserAssigned' :type type: str - :param tenant_id: the identifier of the tenant containing the application's identity. + :param tenant_id: the identifier of the tenant containing the + application's identity. :type tenant_id: str - :param principal_id: the object identifier of the Service Principal of the identity associated - with this resource. + :param principal_id: the object identifier of the Service Principal of the + identity associated with this resource. :type principal_id: str :param user_assigned_identities: represents user assigned identities map. - :type user_assigned_identities: dict[str, ~azure.servicefabric.models.IdentityItemDescription] + :type user_assigned_identities: dict[str, + ~azure.servicefabric.models.IdentityItemDescription] """ _validation = { @@ -11984,26 +11185,23 @@ class IdentityDescription(msrest.serialization.Model): 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{IdentityItemDescription}'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(IdentityDescription, self).__init__(**kwargs) self.token_service_endpoint = kwargs.get('token_service_endpoint', None) - self.type = kwargs['type'] + self.type = kwargs.get('type', None) self.tenant_id = kwargs.get('tenant_id', None) self.principal_id = kwargs.get('principal_id', None) self.user_assigned_identities = kwargs.get('user_assigned_identities', None) -class IdentityItemDescription(msrest.serialization.Model): +class IdentityItemDescription(Model): """Describes a single user-assigned identity associated with the application. - :param principal_id: the object identifier of the Service Principal which this identity - represents. + :param principal_id: the object identifier of the Service Principal which + this identity represents. :type principal_id: str - :param client_id: the client identifier of the Service Principal which this identity - represents. + :param client_id: the client identifier of the Service Principal which + this identity represents. :type client_id: str """ @@ -12012,32 +11210,30 @@ class IdentityItemDescription(msrest.serialization.Model): 'client_id': {'key': 'clientId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(IdentityItemDescription, self).__init__(**kwargs) self.principal_id = kwargs.get('principal_id', None) self.client_id = kwargs.get('client_id', None) -class ImageRegistryCredential(msrest.serialization.Model): +class ImageRegistryCredential(Model): """Image registry credential. All required parameters must be populated in order to send to Azure. - :param server: Required. Docker image registry server, without protocol such as ``http`` and - ``https``. + :param server: Required. Docker image registry server, without protocol + such as `http` and `https`. :type server: str :param username: Required. The username for the private registry. :type username: str - :param password_type: The type of the image registry password being given in password. Possible - values include: "ClearText", "KeyVaultReference", "SecretValueReference". Default value: - "ClearText". - :type password_type: str or ~azure.servicefabric.models.ImageRegistryPasswordType - :param password: The password for the private registry. The password is required for create or - update operations, however it is not returned in the get or list operations. Will be processed - based on the type provided. + :param password_type: The type of the image registry password being given + in password. Possible values include: 'ClearText', 'KeyVaultReference', + 'SecretValueReference'. Default value: "ClearText" . + :type password_type: str or + ~azure.servicefabric.models.ImageRegistryPasswordType + :param password: The password for the private registry. The password is + required for create or update operations, however it is not returned in + the get or list operations. Will be processed based on the type provided. :type password: str """ @@ -12053,25 +11249,22 @@ class ImageRegistryCredential(msrest.serialization.Model): 'password': {'key': 'password', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ImageRegistryCredential, self).__init__(**kwargs) - self.server = kwargs['server'] - self.username = kwargs['username'] + self.server = kwargs.get('server', None) + self.username = kwargs.get('username', None) self.password_type = kwargs.get('password_type', "ClearText") self.password = kwargs.get('password', None) -class ImageStoreContent(msrest.serialization.Model): +class ImageStoreContent(Model): """Information about the image store content. - :param store_files: The list of image store file info objects represents files found under the - given image store relative path. + :param store_files: The list of image store file info objects represents + files found under the given image store relative path. :type store_files: list[~azure.servicefabric.models.FileInfo] - :param store_folders: The list of image store folder info objects represents subfolders found - under the given image store relative path. + :param store_folders: The list of image store folder info objects + represents subfolders found under the given image store relative path. :type store_folders: list[~azure.servicefabric.models.FolderInfo] """ @@ -12080,32 +11273,31 @@ class ImageStoreContent(msrest.serialization.Model): 'store_folders': {'key': 'StoreFolders', 'type': '[FolderInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ImageStoreContent, self).__init__(**kwargs) self.store_files = kwargs.get('store_files', None) self.store_folders = kwargs.get('store_folders', None) -class ImageStoreCopyDescription(msrest.serialization.Model): - """Information about how to copy image store content from one image store relative path to another image store relative path. +class ImageStoreCopyDescription(Model): + """Information about how to copy image store content from one image store + relative path to another image store relative path. All required parameters must be populated in order to send to Azure. - :param remote_source: Required. The relative path of source image store content to be copied - from. + :param remote_source: Required. The relative path of source image store + content to be copied from. :type remote_source: str - :param remote_destination: Required. The relative path of destination image store content to be - copied to. + :param remote_destination: Required. The relative path of destination + image store content to be copied to. :type remote_destination: str :param skip_files: The list of the file names to be skipped for copying. :type skip_files: list[str] - :param check_mark_file: Indicates whether to check mark file during copying. The property is - true if checking mark file is required, false otherwise. The mark file is used to check whether - the folder is well constructed. If the property is true and mark file does not exist, the copy - is skipped. + :param check_mark_file: Indicates whether to check mark file during + copying. The property is true if checking mark file is required, false + otherwise. The mark file is used to check whether the folder is well + constructed. If the property is true and mark file does not exist, the + copy is skipped. :type check_mark_file: bool """ @@ -12121,38 +11313,35 @@ class ImageStoreCopyDescription(msrest.serialization.Model): 'check_mark_file': {'key': 'CheckMarkFile', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ImageStoreCopyDescription, self).__init__(**kwargs) - self.remote_source = kwargs['remote_source'] - self.remote_destination = kwargs['remote_destination'] + self.remote_source = kwargs.get('remote_source', None) + self.remote_destination = kwargs.get('remote_destination', None) self.skip_files = kwargs.get('skip_files', None) self.check_mark_file = kwargs.get('check_mark_file', None) -class ImageStoreInfo(msrest.serialization.Model): +class ImageStoreInfo(Model): """Information about the ImageStore's resource usage. - :param disk_info: disk capacity and available disk space on the node where the ImageStore - primary is placed. + :param disk_info: disk capacity and available disk space on the node where + the ImageStore primary is placed. :type disk_info: ~azure.servicefabric.models.DiskInfo :param used_by_metadata: the ImageStore's file system usage for metadata. :type used_by_metadata: ~azure.servicefabric.models.UsageInfo - :param used_by_staging: The ImageStore's file system usage for staging files that are being - uploaded. + :param used_by_staging: The ImageStore's file system usage for staging + files that are being uploaded. :type used_by_staging: ~azure.servicefabric.models.UsageInfo - :param used_by_copy: the ImageStore's file system usage for copied application and cluster - packages. `Removing application and cluster packages - `_ will - free up this space. + :param used_by_copy: the ImageStore's file system usage for copied + application and cluster packages. [Removing application and cluster + packages](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-deleteimagestorecontent) + will free up this space. :type used_by_copy: ~azure.servicefabric.models.UsageInfo - :param used_by_register: the ImageStore's file system usage for registered and cluster - packages. `Unregistering application - `_ - and `cluster packages - `_ + :param used_by_register: the ImageStore's file system usage for registered + and cluster packages. [Unregistering + application](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-unprovisionapplicationtype) + and [cluster + packages](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-unprovisionapplicationtype) will free up this space. :type used_by_register: ~azure.servicefabric.models.UsageInfo """ @@ -12165,10 +11354,7 @@ class ImageStoreInfo(msrest.serialization.Model): 'used_by_register': {'key': 'UsedByRegister', 'type': 'UsageInfo'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ImageStoreInfo, self).__init__(**kwargs) self.disk_info = kwargs.get('disk_info', None) self.used_by_metadata = kwargs.get('used_by_metadata', None) @@ -12177,17 +11363,17 @@ def __init__( self.used_by_register = kwargs.get('used_by_register', None) -class SecretResourcePropertiesBase(msrest.serialization.Model): - """This type describes the properties of a secret resource, including its kind. +class SecretResourcePropertiesBase(Model): + """This type describes the properties of a secret resource, including its + kind. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SecretResourceProperties. + sub-classes are: SecretResourceProperties All required parameters must be populated in order to send to Azure. - :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values - include: "inlinedValue", "keyVaultVersionedReference". - :type kind: str or ~azure.servicefabric.models.SecretKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -12202,36 +11388,35 @@ class SecretResourcePropertiesBase(msrest.serialization.Model): 'kind': {'SecretResourceProperties': 'SecretResourceProperties'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SecretResourcePropertiesBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class SecretResourceProperties(SecretResourcePropertiesBase): """Describes the properties of a secret resource. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: InlinedValueSecretResourceProperties. + sub-classes are: InlinedValueSecretResourceProperties - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values - include: "inlinedValue", "keyVaultVersionedReference". - :type kind: str or ~azure.servicefabric.models.SecretKind + :param kind: Required. Constant filled by server. + :type kind: str :param description: User readable description of the secret. :type description: str - :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the resource. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the secret. + :ivar status_details: Gives additional information about the current + status of the secret. :vartype status_details: str - :param content_type: The type of the content stored in the secret value. The value of this - property is opaque to Service Fabric. Once set, the value of this property cannot be changed. + :param content_type: The type of the content stored in the secret value. + The value of this property is opaque to Service Fabric. Once set, the + value of this property cannot be changed. :type content_type: str """ @@ -12253,37 +11438,40 @@ class SecretResourceProperties(SecretResourcePropertiesBase): 'kind': {'inlinedValue': 'InlinedValueSecretResourceProperties'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SecretResourceProperties, self).__init__(**kwargs) - self.kind = 'SecretResourceProperties' # type: str self.description = kwargs.get('description', None) self.status = None self.status_details = None self.content_type = kwargs.get('content_type', None) + self.kind = 'SecretResourceProperties' class InlinedValueSecretResourceProperties(SecretResourceProperties): - """Describes the properties of a secret resource whose value is provided explicitly as plaintext. The secret resource may have multiple values, each being uniquely versioned. The secret value of each version is stored encrypted, and delivered as plaintext into the context of applications referencing it. + """Describes the properties of a secret resource whose value is provided + explicitly as plaintext. The secret resource may have multiple values, each + being uniquely versioned. The secret value of each version is stored + encrypted, and delivered as plaintext into the context of applications + referencing it. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values - include: "inlinedValue", "keyVaultVersionedReference". - :type kind: str or ~azure.servicefabric.models.SecretKind + :param kind: Required. Constant filled by server. + :type kind: str :param description: User readable description of the secret. :type description: str - :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the resource. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the secret. + :ivar status_details: Gives additional information about the current + status of the secret. :vartype status_details: str - :param content_type: The type of the content stored in the secret value. The value of this - property is opaque to Service Fabric. Once set, the value of this property cannot be changed. + :param content_type: The type of the content stored in the secret value. + The value of this property is opaque to Service Fabric. Once set, the + value of this property cannot be changed. :type content_type: str """ @@ -12301,19 +11489,16 @@ class InlinedValueSecretResourceProperties(SecretResourceProperties): 'content_type': {'key': 'contentType', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(InlinedValueSecretResourceProperties, self).__init__(**kwargs) - self.kind = 'inlinedValue' # type: str + self.kind = 'inlinedValue' -class InstanceLifecycleDescription(msrest.serialization.Model): +class InstanceLifecycleDescription(Model): """Describes how the instance will behave. - :param restore_replica_location_after_upgrade: If set to true, move/swap replica to original - location after upgrade. + :param restore_replica_location_after_upgrade: If set to true, move/swap + replica to original location after upgrade. :type restore_replica_location_after_upgrade: bool """ @@ -12321,10 +11506,7 @@ class InstanceLifecycleDescription(msrest.serialization.Model): 'restore_replica_location_after_upgrade': {'key': 'RestoreReplicaLocationAfterUpgrade', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(InstanceLifecycleDescription, self).__init__(**kwargs) self.restore_replica_location_after_upgrade = kwargs.get('restore_replica_location_after_upgrade', None) @@ -12334,10 +11516,8 @@ class Int64PropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str :param data: Required. The data of the property value. :type data: str """ @@ -12352,32 +11532,30 @@ class Int64PropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(Int64PropertyValue, self).__init__(**kwargs) - self.kind = 'Int64' # type: str - self.data = kwargs['data'] + self.data = kwargs.get('data', None) + self.kind = 'Int64' -class PartitionInformation(msrest.serialization.Model): - """Information about the partition identity, partitioning scheme and keys supported by it. +class PartitionInformation(Model): + """Information about the partition identity, partitioning scheme and keys + supported by it. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, SingletonPartitionInformation. + sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, + SingletonPartitionInformation All required parameters must be populated in order to send to Azure. - :param service_partition_kind: Required. The kind of partitioning scheme used to partition the - service.Constant filled by server. Possible values include: "Invalid", "Singleton", - "Int64Range", "Named". - :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind - :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a - randomly generated GUID when the service was created. The partition ID is unique and does not - change for the lifetime of the service. If the same service was deleted and recreated the IDs - of its partitions would be different. + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str """ _validation = { @@ -12385,40 +11563,38 @@ class PartitionInformation(msrest.serialization.Model): } _attribute_map = { - 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, } _subtype_map = { 'service_partition_kind': {'Int64Range': 'Int64RangePartitionInformation', 'Named': 'NamedPartitionInformation', 'Singleton': 'SingletonPartitionInformation'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionInformation, self).__init__(**kwargs) - self.service_partition_kind = None # type: Optional[str] self.id = kwargs.get('id', None) + self.service_partition_kind = None class Int64RangePartitionInformation(PartitionInformation): - """Describes the partition information for the integer range that is based on partition schemes. + """Describes the partition information for the integer range that is based on + partition schemes. All required parameters must be populated in order to send to Azure. - :param service_partition_kind: Required. The kind of partitioning scheme used to partition the - service.Constant filled by server. Possible values include: "Invalid", "Singleton", - "Int64Range", "Named". - :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind - :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a - randomly generated GUID when the service was created. The partition ID is unique and does not - change for the lifetime of the service. If the same service was deleted and recreated the IDs - of its partitions would be different. + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str :param low_key: Specifies the minimum key value handled by this partition. :type low_key: str - :param high_key: Specifies the maximum key value handled by this partition. + :param high_key: Specifies the maximum key value handled by this + partition. :type high_key: str """ @@ -12427,30 +11603,28 @@ class Int64RangePartitionInformation(PartitionInformation): } _attribute_map = { - 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'low_key': {'key': 'LowKey', 'type': 'str'}, 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(Int64RangePartitionInformation, self).__init__(**kwargs) - self.service_partition_kind = 'Int64Range' # type: str self.low_key = kwargs.get('low_key', None) self.high_key = kwargs.get('high_key', None) + self.service_partition_kind = 'Int64Range' -class InvokeDataLossResult(msrest.serialization.Model): - """Represents information about an operation in a terminal state (Completed or Faulted). +class InvokeDataLossResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). - :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, - this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the partition that the - user-induced operation acted upon. + :param selected_partition: This class returns information about the + partition that the user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -12459,23 +11633,21 @@ class InvokeDataLossResult(msrest.serialization.Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(InvokeDataLossResult, self).__init__(**kwargs) self.error_code = kwargs.get('error_code', None) self.selected_partition = kwargs.get('selected_partition', None) -class InvokeQuorumLossResult(msrest.serialization.Model): - """Represents information about an operation in a terminal state (Completed or Faulted). +class InvokeQuorumLossResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). - :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, - this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the partition that the - user-induced operation acted upon. + :param selected_partition: This class returns information about the + partition that the user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -12484,26 +11656,22 @@ class InvokeQuorumLossResult(msrest.serialization.Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(InvokeQuorumLossResult, self).__init__(**kwargs) self.error_code = kwargs.get('error_code', None) self.selected_partition = kwargs.get('selected_partition', None) -class ReplicaStatusBase(msrest.serialization.Model): +class ReplicaStatusBase(Model): """Information about the replica. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: KeyValueStoreReplicaStatus. + sub-classes are: KeyValueStoreReplicaStatus All required parameters must be populated in order to send to Azure. - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Invalid", "KeyValueStore". - :type kind: str or ~azure.servicefabric.models.ReplicaKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -12518,12 +11686,9 @@ class ReplicaStatusBase(msrest.serialization.Model): 'kind': {'KeyValueStore': 'KeyValueStoreReplicaStatus'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaStatusBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class KeyValueStoreReplicaStatus(ReplicaStatusBase): @@ -12531,22 +11696,24 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): All required parameters must be populated in order to send to Azure. - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Invalid", "KeyValueStore". - :type kind: str or ~azure.servicefabric.models.ReplicaKind - :param database_row_count_estimate: Value indicating the estimated number of rows in the - underlying database. + :param kind: Required. Constant filled by server. + :type kind: str + :param database_row_count_estimate: Value indicating the estimated number + of rows in the underlying database. :type database_row_count_estimate: str - :param database_logical_size_estimate: Value indicating the estimated size of the underlying - database. + :param database_logical_size_estimate: Value indicating the estimated size + of the underlying database. :type database_logical_size_estimate: str - :param copy_notification_current_key_filter: Value indicating the latest key-prefix filter - applied to enumeration during the callback. Null if there is no pending callback. + :param copy_notification_current_key_filter: Value indicating the latest + key-prefix filter applied to enumeration during the callback. Null if + there is no pending callback. :type copy_notification_current_key_filter: str - :param copy_notification_current_progress: Value indicating the latest number of keys - enumerated during the callback. 0 if there is no pending callback. + :param copy_notification_current_progress: Value indicating the latest + number of keys enumerated during the callback. 0 if there is no pending + callback. :type copy_notification_current_progress: str - :param status_details: Value indicating the current status details of the replica. + :param status_details: Value indicating the current status details of the + replica. :type status_details: str """ @@ -12563,39 +11730,41 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(KeyValueStoreReplicaStatus, self).__init__(**kwargs) - self.kind = 'KeyValueStore' # type: str self.database_row_count_estimate = kwargs.get('database_row_count_estimate', None) self.database_logical_size_estimate = kwargs.get('database_logical_size_estimate', None) self.copy_notification_current_key_filter = kwargs.get('copy_notification_current_key_filter', None) self.copy_notification_current_progress = kwargs.get('copy_notification_current_progress', None) self.status_details = kwargs.get('status_details', None) + self.kind = 'KeyValueStore' -class LoadedPartitionInformationQueryDescription(msrest.serialization.Model): +class LoadedPartitionInformationQueryDescription(Model): """Represents data structure that contains query information. - :param metric_name: Name of the metric for which this information is provided. + :param metric_name: Name of the metric for which this information is + provided. :type metric_name: str :param service_name: Name of the service this partition belongs to. :type service_name: str - :param ordering: Ordering of partitions' load. Possible values include: "Desc", "Asc". + :param ordering: Ordering of partitions' load. Possible values include: + 'Desc', 'Asc'. Default value: "Desc" . :type ordering: str or ~azure.servicefabric.models.Ordering - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. + :param max_results: The maximum number of results to be returned as part + of the paged queries. This parameter defines the upper bound on the number + of results returned. The results returned can be less than the specified + maximum results if they do not fit in the message as per the max message + size restrictions defined in the configuration. If this parameter is zero + or not specified, the paged query includes as many results as possible + that fit in the return message. :type max_results: long - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str """ @@ -12607,28 +11776,27 @@ class LoadedPartitionInformationQueryDescription(msrest.serialization.Model): 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(LoadedPartitionInformationQueryDescription, self).__init__(**kwargs) self.metric_name = kwargs.get('metric_name', None) self.service_name = kwargs.get('service_name', None) - self.ordering = kwargs.get('ordering', None) + self.ordering = kwargs.get('ordering', "Desc") self.max_results = kwargs.get('max_results', None) self.continuation_token = kwargs.get('continuation_token', None) -class LoadedPartitionInformationResult(msrest.serialization.Model): +class LoadedPartitionInformationResult(Model): """Represents partition information. All required parameters must be populated in order to send to Azure. - :param service_name: Required. Name of the service this partition belongs to. + :param service_name: Required. Name of the service this partition belongs + to. :type service_name: str :param partition_id: Required. Id of the partition. :type partition_id: str - :param metric_name: Required. Name of the metric for which this information is provided. + :param metric_name: Required. Name of the metric for which this + information is provided. :type metric_name: str :param load: Required. Load for metric. :type load: long @@ -12648,28 +11816,28 @@ class LoadedPartitionInformationResult(msrest.serialization.Model): 'load': {'key': 'Load', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(LoadedPartitionInformationResult, self).__init__(**kwargs) - self.service_name = kwargs['service_name'] - self.partition_id = kwargs['partition_id'] - self.metric_name = kwargs['metric_name'] - self.load = kwargs['load'] + self.service_name = kwargs.get('service_name', None) + self.partition_id = kwargs.get('partition_id', None) + self.metric_name = kwargs.get('metric_name', None) + self.load = kwargs.get('load', None) -class LoadedPartitionInformationResultList(msrest.serialization.Model): - """Represents data structure that contains top/least loaded partitions for a certain metric. +class LoadedPartitionInformationResultList(Model): + """Represents data structure that contains top/least loaded partitions for a + certain metric. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of application information. - :type items: list[~azure.servicefabric.models.LoadedPartitionInformationResult] + :type items: + list[~azure.servicefabric.models.LoadedPartitionInformationResult] """ _attribute_map = { @@ -12677,85 +11845,91 @@ class LoadedPartitionInformationResultList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[LoadedPartitionInformationResult]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(LoadedPartitionInformationResultList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class LoadMetricInformation(msrest.serialization.Model): - """Represents data structure that contains load information for a certain metric in a cluster. +class LoadMetricInformation(Model): + """Represents data structure that contains load information for a certain + metric in a cluster. - :param name: Name of the metric for which this load information is provided. + :param name: Name of the metric for which this load information is + provided. :type name: str - :param is_balanced_before: Value that indicates whether the metrics is balanced or not before - resource balancer run. + :param is_balanced_before: Value that indicates whether the metrics is + balanced or not before resource balancer run :type is_balanced_before: bool - :param is_balanced_after: Value that indicates whether the metrics is balanced or not after - resource balancer run. + :param is_balanced_after: Value that indicates whether the metrics is + balanced or not after resource balancer run. :type is_balanced_after: bool - :param deviation_before: The standard average deviation of the metrics before resource balancer - run. + :param deviation_before: The standard average deviation of the metrics + before resource balancer run. :type deviation_before: str - :param deviation_after: The standard average deviation of the metrics after resource balancer - run. + :param deviation_after: The standard average deviation of the metrics + after resource balancer run. :type deviation_after: str :param balancing_threshold: The balancing threshold for a certain metric. :type balancing_threshold: str - :param action: The current action being taken with regard to this metric. + :param action: The current action being taken with regard to this metric :type action: str - :param activity_threshold: The Activity Threshold specified for this metric in the system - Cluster Manifest. + :param activity_threshold: The Activity Threshold specified for this + metric in the system Cluster Manifest. :type activity_threshold: str - :param cluster_capacity: The total cluster capacity for a given metric. + :param cluster_capacity: The total cluster capacity for a given metric :type cluster_capacity: str - :param cluster_load: The total cluster load. In future releases of Service Fabric this - parameter will be deprecated in favor of CurrentClusterLoad. + :param cluster_load: The total cluster load. In future releases of Service + Fabric this parameter will be deprecated in favor of CurrentClusterLoad. :type cluster_load: str :param current_cluster_load: The total cluster load. :type current_cluster_load: str - :param cluster_remaining_capacity: The remaining capacity for the metric in the cluster. In - future releases of Service Fabric this parameter will be deprecated in favor of - ClusterCapacityRemaining. + :param cluster_remaining_capacity: The remaining capacity for the metric + in the cluster. In future releases of Service Fabric this parameter will + be deprecated in favor of ClusterCapacityRemaining. :type cluster_remaining_capacity: str - :param cluster_capacity_remaining: The remaining capacity for the metric in the cluster. + :param cluster_capacity_remaining: The remaining capacity for the metric + in the cluster. :type cluster_capacity_remaining: str - :param is_cluster_capacity_violation: Indicates that the metric is currently over capacity in - the cluster. + :param is_cluster_capacity_violation: Indicates that the metric is + currently over capacity in the cluster. :type is_cluster_capacity_violation: bool - :param node_buffer_percentage: The reserved percentage of total node capacity for this metric. + :param node_buffer_percentage: The reserved percentage of total node + capacity for this metric. :type node_buffer_percentage: str - :param cluster_buffered_capacity: Remaining capacity in the cluster excluding the reserved - space. In future releases of Service Fabric this parameter will be deprecated in favor of - BufferedClusterCapacityRemaining. + :param cluster_buffered_capacity: Remaining capacity in the cluster + excluding the reserved space. In future releases of Service Fabric this + parameter will be deprecated in favor of BufferedClusterCapacityRemaining. :type cluster_buffered_capacity: str - :param buffered_cluster_capacity_remaining: Remaining capacity in the cluster excluding the - reserved space. + :param buffered_cluster_capacity_remaining: Remaining capacity in the + cluster excluding the reserved space. :type buffered_cluster_capacity_remaining: str - :param cluster_remaining_buffered_capacity: The remaining percentage of cluster total capacity - for this metric. + :param cluster_remaining_buffered_capacity: The remaining percentage of + cluster total capacity for this metric. :type cluster_remaining_buffered_capacity: str - :param min_node_load_value: The minimum load on any node for this metric. In future releases of - Service Fabric this parameter will be deprecated in favor of MinimumNodeLoad. + :param min_node_load_value: The minimum load on any node for this metric. + In future releases of Service Fabric this parameter will be deprecated in + favor of MinimumNodeLoad. :type min_node_load_value: str :param minimum_node_load: The minimum load on any node for this metric. :type minimum_node_load: str - :param min_node_load_node_id: The node id of the node with the minimum load for this metric. + :param min_node_load_node_id: The node id of the node with the minimum + load for this metric. :type min_node_load_node_id: ~azure.servicefabric.models.NodeId - :param max_node_load_value: The maximum load on any node for this metric. In future releases of - Service Fabric this parameter will be deprecated in favor of MaximumNodeLoad. + :param max_node_load_value: The maximum load on any node for this metric. + In future releases of Service Fabric this parameter will be deprecated in + favor of MaximumNodeLoad. :type max_node_load_value: str :param maximum_node_load: The maximum load on any node for this metric. :type maximum_node_load: str - :param max_node_load_node_id: The node id of the node with the maximum load for this metric. + :param max_node_load_node_id: The node id of the node with the maximum + load for this metric. :type max_node_load_node_id: ~azure.servicefabric.models.NodeId - :param planned_load_removal: This value represents the load of the replicas that are planned to - be removed in the future within the cluster. - This kind of load is reported for replicas that are currently being moving to other nodes and - for replicas that are currently being dropped but still use the load on the source node. + :param planned_load_removal: This value represents the load of the + replicas that are planned to be removed in the future within the cluster. + This kind of load is reported for replicas that are currently being moving + to other nodes and for replicas that are currently being dropped but still + use the load on the source node. :type planned_load_removal: str """ @@ -12787,10 +11961,7 @@ class LoadMetricInformation(msrest.serialization.Model): 'planned_load_removal': {'key': 'PlannedLoadRemoval', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(LoadMetricInformation, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.is_balanced_before = kwargs.get('is_balanced_before', None) @@ -12819,15 +11990,16 @@ def __init__( self.planned_load_removal = kwargs.get('planned_load_removal', None) -class LoadMetricReport(msrest.serialization.Model): - """Represents the load metric report which contains the time metric was reported, its name and value. +class LoadMetricReport(Model): + """Represents the load metric report which contains the time metric was + reported, its name and value. :param last_reported_utc: Gets the UTC time when the load was reported. - :type last_reported_utc: ~datetime.datetime + :type last_reported_utc: datetime :param name: The name of the load metric. :type name: str - :param value: The value of the load metric. In future releases of Service Fabric this parameter - will be deprecated in favor of CurrentValue. + :param value: The value of the load metric. In future releases of Service + Fabric this parameter will be deprecated in favor of CurrentValue. :type value: str :param current_value: The value of the load metric. :type current_value: str @@ -12840,10 +12012,7 @@ class LoadMetricReport(msrest.serialization.Model): 'current_value': {'key': 'CurrentValue', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(LoadMetricReport, self).__init__(**kwargs) self.last_reported_utc = kwargs.get('last_reported_utc', None) self.name = kwargs.get('name', None) @@ -12851,18 +12020,18 @@ def __init__( self.current_value = kwargs.get('current_value', None) -class LoadMetricReportInfo(msrest.serialization.Model): +class LoadMetricReportInfo(Model): """Information about load reported by replica. :param name: The name of the metric. :type name: str - :param value: The value of the load for the metric. In future releases of Service Fabric this - parameter will be deprecated in favor of CurrentValue. + :param value: The value of the load for the metric. In future releases of + Service Fabric this parameter will be deprecated in favor of CurrentValue. :type value: int :param current_value: The double value of the load for the metric. :type current_value: str :param last_reported_utc: The UTC time when the load is reported. - :type last_reported_utc: ~datetime.datetime + :type last_reported_utc: datetime """ _attribute_map = { @@ -12872,10 +12041,7 @@ class LoadMetricReportInfo(msrest.serialization.Model): 'last_reported_utc': {'key': 'LastReportedUtc', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(LoadMetricReportInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) @@ -12883,17 +12049,17 @@ def __init__( self.last_reported_utc = kwargs.get('last_reported_utc', None) -class NetworkResourcePropertiesBase(msrest.serialization.Model): - """This type describes the properties of a network resource, including its kind. +class NetworkResourcePropertiesBase(Model): + """This type describes the properties of a network resource, including its + kind. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NetworkResourceProperties. + sub-classes are: NetworkResourceProperties All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of a Service Fabric container network.Constant filled by - server. Possible values include: "Local". - :type kind: str or ~azure.servicefabric.models.NetworkKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -12908,33 +12074,31 @@ class NetworkResourcePropertiesBase(msrest.serialization.Model): 'kind': {'NetworkResourceProperties': 'NetworkResourceProperties'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NetworkResourcePropertiesBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class NetworkResourceProperties(NetworkResourcePropertiesBase): """Describes properties of a network resource. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LocalNetworkResourceProperties. + sub-classes are: LocalNetworkResourceProperties - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of a Service Fabric container network.Constant filled by - server. Possible values include: "Local". - :type kind: str or ~azure.servicefabric.models.NetworkKind + :param kind: Required. Constant filled by server. + :type kind: str :param description: User readable description of the network. :type description: str - :ivar status: Status of the network. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the network. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the network. + :ivar status_details: Gives additional information about the current + status of the network. :vartype status_details: str """ @@ -12955,35 +12119,35 @@ class NetworkResourceProperties(NetworkResourcePropertiesBase): 'kind': {'Local': 'LocalNetworkResourceProperties'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NetworkResourceProperties, self).__init__(**kwargs) - self.kind = 'NetworkResourceProperties' # type: str self.description = kwargs.get('description', None) self.status = None self.status_details = None + self.kind = 'NetworkResourceProperties' class LocalNetworkResourceProperties(NetworkResourceProperties): - """Information about a Service Fabric container network local to a single Service Fabric cluster. + """Information about a Service Fabric container network local to a single + Service Fabric cluster. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of a Service Fabric container network.Constant filled by - server. Possible values include: "Local". - :type kind: str or ~azure.servicefabric.models.NetworkKind + :param kind: Required. Constant filled by server. + :type kind: str :param description: User readable description of the network. :type description: str - :ivar status: Status of the network. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the network. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the network. + :ivar status_details: Gives additional information about the current + status of the network. :vartype status_details: str - :param network_address_prefix: Address space for the local container network. + :param network_address_prefix: Address space for the local container + network. :type network_address_prefix: str """ @@ -13001,16 +12165,13 @@ class LocalNetworkResourceProperties(NetworkResourceProperties): 'network_address_prefix': {'key': 'networkAddressPrefix', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(LocalNetworkResourceProperties, self).__init__(**kwargs) - self.kind = 'Local' # type: str self.network_address_prefix = kwargs.get('network_address_prefix', None) + self.kind = 'Local' -class ManagedApplicationIdentity(msrest.serialization.Model): +class ManagedApplicationIdentity(Model): """Describes a managed application identity. All required parameters must be populated in order to send to Azure. @@ -13030,22 +12191,20 @@ class ManagedApplicationIdentity(msrest.serialization.Model): 'principal_id': {'key': 'PrincipalId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ManagedApplicationIdentity, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.principal_id = kwargs.get('principal_id', None) -class ManagedApplicationIdentityDescription(msrest.serialization.Model): +class ManagedApplicationIdentityDescription(Model): """Managed application identity description. :param token_service_endpoint: Token service endpoint. :type token_service_endpoint: str :param managed_identities: A list of managed application identity objects. - :type managed_identities: list[~azure.servicefabric.models.ManagedApplicationIdentity] + :type managed_identities: + list[~azure.servicefabric.models.ManagedApplicationIdentity] """ _attribute_map = { @@ -13053,33 +12212,32 @@ class ManagedApplicationIdentityDescription(msrest.serialization.Model): 'managed_identities': {'key': 'ManagedIdentities', 'type': '[ManagedApplicationIdentity]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ManagedApplicationIdentityDescription, self).__init__(**kwargs) self.token_service_endpoint = kwargs.get('token_service_endpoint', None) self.managed_identities = kwargs.get('managed_identities', None) class ManagedIdentityAzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Azure blob store (connected using managed identity) used for storing and enumerating backups. + """Describes the parameters for Azure blob store (connected using managed + identity) used for storing and enumerating backups. All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param managed_identity_type: Required. The type of managed identity to be used to connect to - Azure Blob Store via Managed Identity. Possible values include: "Invalid", "VMSS", "Cluster". - :type managed_identity_type: str or ~azure.servicefabric.models.ManagedIdentityType - :param blob_service_uri: Required. The Blob Service Uri to connect to the Azure blob store.. + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param managed_identity_type: Required. The type of managed identity to be + used to connect to Azure Blob Store via Managed Identity. Possible values + include: 'Invalid', 'VMSS', 'Cluster' + :type managed_identity_type: str or + ~azure.servicefabric.models.ManagedIdentityType + :param blob_service_uri: Required. The Blob Service Uri to connect to the + Azure blob store.. :type blob_service_uri: str - :param container_name: Required. The name of the container in the blob store to store and - enumerate backups from. + :param container_name: Required. The name of the container in the blob + store to store and enumerate backups from. :type container_name: str """ @@ -13091,25 +12249,22 @@ class ManagedIdentityAzureBlobBackupStorageDescription(BackupStorageDescription) } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'managed_identity_type': {'key': 'ManagedIdentityType', 'type': 'str'}, 'blob_service_uri': {'key': 'BlobServiceUri', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ManagedIdentityAzureBlobBackupStorageDescription, self).__init__(**kwargs) - self.storage_kind = 'ManagedIdentityAzureBlobStore' # type: str - self.managed_identity_type = kwargs['managed_identity_type'] - self.blob_service_uri = kwargs['blob_service_uri'] - self.container_name = kwargs['container_name'] + self.managed_identity_type = kwargs.get('managed_identity_type', None) + self.blob_service_uri = kwargs.get('blob_service_uri', None) + self.container_name = kwargs.get('container_name', None) + self.storage_kind = 'ManagedIdentityAzureBlobStore' -class MetricLoadDescription(msrest.serialization.Model): +class MetricLoadDescription(Model): """Specifies metric load information. :param metric_name: The name of the reported metric. @@ -13126,50 +12281,52 @@ class MetricLoadDescription(msrest.serialization.Model): 'predicted_load': {'key': 'PredictedLoad', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(MetricLoadDescription, self).__init__(**kwargs) self.metric_name = kwargs.get('metric_name', None) self.current_load = kwargs.get('current_load', None) self.predicted_load = kwargs.get('predicted_load', None) -class MonitoringPolicyDescription(msrest.serialization.Model): +class MonitoringPolicyDescription(Model): """Describes the parameters for monitoring an upgrade in Monitored mode. - :param failure_action: The compensating action to perform when a Monitored upgrade encounters - monitoring policy or health policy violations. - Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will - start rolling back automatically. - Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. Possible - values include: "Invalid", "Rollback", "Manual". + :param failure_action: The compensating action to perform when a Monitored + upgrade encounters monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that + the upgrade will start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade + mode. Possible values include: 'Invalid', 'Rollback', 'Manual' :type failure_action: str or ~azure.servicefabric.models.FailureAction - :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing - an upgrade domain before applying health policies. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted as a number - representing the total number of milliseconds. + :param health_check_wait_duration_in_milliseconds: The amount of time to + wait after completing an upgrade domain before applying health policies. + It is first interpreted as a string representing an ISO 8601 duration. If + that fails, then it is interpreted as a number representing the total + number of milliseconds. :type health_check_wait_duration_in_milliseconds: str - :param health_check_stable_duration_in_milliseconds: The amount of time that the application or - cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first - interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param health_check_stable_duration_in_milliseconds: The amount of time + that the application or cluster must remain healthy before the upgrade + proceeds to the next upgrade domain. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type health_check_stable_duration_in_milliseconds: str - :param health_check_retry_timeout_in_milliseconds: The amount of time to retry health - evaluation when the application or cluster is unhealthy before FailureAction is executed. It is - first interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param health_check_retry_timeout_in_milliseconds: The amount of time to + retry health evaluation when the application or cluster is unhealthy + before FailureAction is executed. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type health_check_retry_timeout_in_milliseconds: str - :param upgrade_timeout_in_milliseconds: The amount of time the overall upgrade has to complete - before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 - duration. If that fails, then it is interpreted as a number representing the total number of + :param upgrade_timeout_in_milliseconds: The amount of time the overall + upgrade has to complete before FailureAction is executed. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, + then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout_in_milliseconds: str - :param upgrade_domain_timeout_in_milliseconds: The amount of time each upgrade domain has to - complete before FailureAction is executed. It is first interpreted as a string representing an - ISO 8601 duration. If that fails, then it is interpreted as a number representing the total - number of milliseconds. + :param upgrade_domain_timeout_in_milliseconds: The amount of time each + upgrade domain has to complete before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that + fails, then it is interpreted as a number representing the total number of + milliseconds. :type upgrade_domain_timeout_in_milliseconds: str """ @@ -13182,25 +12339,23 @@ class MonitoringPolicyDescription(msrest.serialization.Model): 'upgrade_domain_timeout_in_milliseconds': {'key': 'UpgradeDomainTimeoutInMilliseconds', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(MonitoringPolicyDescription, self).__init__(**kwargs) self.failure_action = kwargs.get('failure_action', None) - self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', "0") - self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', "PT0H2M0S") - self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', "PT0H10M0S") - self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', "P10675199DT02H48M05.4775807S") - self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', "P10675199DT02H48M05.4775807S") + self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', None) + self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', None) + self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', None) + self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', None) + self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', None) -class NameDescription(msrest.serialization.Model): +class NameDescription(Model): """Describes a Service Fabric name. All required parameters must be populated in order to send to Azure. - :param name: Required. The Service Fabric name, including the 'fabric:' URI scheme. + :param name: Required. The Service Fabric name, including the 'fabric:' + URI scheme. :type name: str """ @@ -13212,28 +12367,25 @@ class NameDescription(msrest.serialization.Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NameDescription, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) class NamedPartitionInformation(PartitionInformation): - """Describes the partition information for the name as a string that is based on partition schemes. + """Describes the partition information for the name as a string that is based + on partition schemes. All required parameters must be populated in order to send to Azure. - :param service_partition_kind: Required. The kind of partitioning scheme used to partition the - service.Constant filled by server. Possible values include: "Invalid", "Singleton", - "Int64Range", "Named". - :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind - :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a - randomly generated GUID when the service was created. The partition ID is unique and does not - change for the lifetime of the service. If the same service was deleted and recreated the IDs - of its partitions would be different. + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str :param name: Name of the partition. :type name: str """ @@ -13243,31 +12395,29 @@ class NamedPartitionInformation(PartitionInformation): } _attribute_map = { - 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NamedPartitionInformation, self).__init__(**kwargs) - self.service_partition_kind = 'Named' # type: str self.name = kwargs.get('name', None) + self.service_partition_kind = 'Named' -class PartitionSchemeDescription(msrest.serialization.Model): +class PartitionSchemeDescription(Model): """Describes how the service is partitioned. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NamedPartitionSchemeDescription, SingletonPartitionSchemeDescription, UniformInt64RangePartitionSchemeDescription. + sub-classes are: NamedPartitionSchemeDescription, + SingletonPartitionSchemeDescription, + UniformInt64RangePartitionSchemeDescription All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by - server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". - :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str """ _validation = { @@ -13282,12 +12432,9 @@ class PartitionSchemeDescription(msrest.serialization.Model): 'partition_scheme': {'Named': 'NamedPartitionSchemeDescription', 'Singleton': 'SingletonPartitionSchemeDescription', 'UniformInt64Range': 'UniformInt64RangePartitionSchemeDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = None # type: Optional[str] + self.partition_scheme = None class NamedPartitionSchemeDescription(PartitionSchemeDescription): @@ -13295,13 +12442,12 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by - server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". - :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str :param count: Required. The number of partitions. :type count: int - :param names: Required. Array of size specified by the ‘Count’ parameter, for the names of the - partitions. + :param names: Required. Array of size specified by the ‘Count’ parameter, + for the names of the partitions. :type names: list[str] """ @@ -13317,22 +12463,20 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): 'names': {'key': 'Names', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NamedPartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = 'Named' # type: str - self.count = kwargs['count'] - self.names = kwargs['names'] + self.count = kwargs.get('count', None) + self.names = kwargs.get('names', None) + self.partition_scheme = 'Named' -class NetworkRef(msrest.serialization.Model): +class NetworkRef(Model): """Describes a network reference in a service. - :param name: Name of the network. + :param name: Name of the network :type name: str - :param endpoint_refs: A list of endpoints that are exposed on this network. + :param endpoint_refs: A list of endpoints that are exposed on this + network. :type endpoint_refs: list[~azure.servicefabric.models.EndpointRef] """ @@ -13341,16 +12485,13 @@ class NetworkRef(msrest.serialization.Model): 'endpoint_refs': {'key': 'endpointRefs', 'type': '[EndpointRef]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NetworkRef, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.endpoint_refs = kwargs.get('endpoint_refs', None) -class NetworkResourceDescription(msrest.serialization.Model): +class NetworkResourceDescription(Model): """This type describes a network resource. All required parameters must be populated in order to send to Azure. @@ -13371,13 +12512,10 @@ class NetworkResourceDescription(msrest.serialization.Model): 'properties': {'key': 'properties', 'type': 'NetworkResourceProperties'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NetworkResourceDescription, self).__init__(**kwargs) - self.name = kwargs['name'] - self.properties = kwargs['properties'] + self.name = kwargs.get('name', None) + self.properties = kwargs.get('properties', None) class NodeAbortedEvent(NodeEvent): @@ -13385,38 +12523,18 @@ class NodeAbortedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -13438,9 +12556,9 @@ class NodeAbortedEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -13453,11 +12571,11 @@ class NodeAbortedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -13469,20 +12587,17 @@ class NodeAbortedEvent(NodeEvent): 'node_version': {'key': 'NodeVersion', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeAbortedEvent, self).__init__(**kwargs) - self.kind = 'NodeAborted' # type: str - self.node_instance = kwargs['node_instance'] - self.node_id = kwargs['node_id'] - self.upgrade_domain = kwargs['upgrade_domain'] - self.fault_domain = kwargs['fault_domain'] - self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] - self.hostname = kwargs['hostname'] - self.is_seed_node = kwargs['is_seed_node'] - self.node_version = kwargs['node_version'] + self.node_instance = kwargs.get('node_instance', None) + self.node_id = kwargs.get('node_id', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.hostname = kwargs.get('hostname', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.node_version = kwargs.get('node_version', None) + self.kind = 'NodeAborted' class NodeAddedToClusterEvent(NodeEvent): @@ -13490,38 +12605,18 @@ class NodeAddedToClusterEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -13539,9 +12634,9 @@ class NodeAddedToClusterEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -13552,11 +12647,11 @@ class NodeAddedToClusterEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, @@ -13566,18 +12661,15 @@ class NodeAddedToClusterEvent(NodeEvent): 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeAddedToClusterEvent, self).__init__(**kwargs) - self.kind = 'NodeAddedToCluster' # type: str - self.node_id = kwargs['node_id'] - self.node_instance = kwargs['node_instance'] - self.node_type = kwargs['node_type'] - self.fabric_version = kwargs['fabric_version'] - self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] - self.node_capacities = kwargs['node_capacities'] + self.node_id = kwargs.get('node_id', None) + self.node_instance = kwargs.get('node_instance', None) + self.node_type = kwargs.get('node_type', None) + self.fabric_version = kwargs.get('fabric_version', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.node_capacities = kwargs.get('node_capacities', None) + self.kind = 'NodeAddedToCluster' class NodeClosedEvent(NodeEvent): @@ -13585,38 +12677,18 @@ class NodeClosedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -13628,9 +12700,9 @@ class NodeClosedEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -13638,26 +12710,23 @@ class NodeClosedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'error': {'key': 'Error', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeClosedEvent, self).__init__(**kwargs) - self.kind = 'NodeClosed' # type: str - self.node_id = kwargs['node_id'] - self.node_instance = kwargs['node_instance'] - self.error = kwargs['error'] + self.node_id = kwargs.get('node_id', None) + self.node_instance = kwargs.get('node_instance', None) + self.error = kwargs.get('error', None) + self.kind = 'NodeClosed' class NodeDeactivateCompletedEvent(NodeEvent): @@ -13665,38 +12734,18 @@ class NodeDeactivateCompletedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -13706,13 +12755,13 @@ class NodeDeactivateCompletedEvent(NodeEvent): :param batch_ids_with_deactivate_intent: Required. Batch Ids. :type batch_ids_with_deactivate_intent: str :param start_time: Required. Start time. - :type start_time: ~datetime.datetime + :type start_time: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'effective_deactivate_intent': {'required': True}, @@ -13721,11 +12770,11 @@ class NodeDeactivateCompletedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'effective_deactivate_intent': {'key': 'EffectiveDeactivateIntent', 'type': 'str'}, @@ -13733,16 +12782,13 @@ class NodeDeactivateCompletedEvent(NodeEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeDeactivateCompletedEvent, self).__init__(**kwargs) - self.kind = 'NodeDeactivateCompleted' # type: str - self.node_instance = kwargs['node_instance'] - self.effective_deactivate_intent = kwargs['effective_deactivate_intent'] - self.batch_ids_with_deactivate_intent = kwargs['batch_ids_with_deactivate_intent'] - self.start_time = kwargs['start_time'] + self.node_instance = kwargs.get('node_instance', None) + self.effective_deactivate_intent = kwargs.get('effective_deactivate_intent', None) + self.batch_ids_with_deactivate_intent = kwargs.get('batch_ids_with_deactivate_intent', None) + self.start_time = kwargs.get('start_time', None) + self.kind = 'NodeDeactivateCompleted' class NodeDeactivateStartedEvent(NodeEvent): @@ -13750,38 +12796,18 @@ class NodeDeactivateStartedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -13793,9 +12819,9 @@ class NodeDeactivateStartedEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'batch_id': {'required': True}, @@ -13803,44 +12829,46 @@ class NodeDeactivateStartedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'batch_id': {'key': 'BatchId', 'type': 'str'}, 'deactivate_intent': {'key': 'DeactivateIntent', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeDeactivateStartedEvent, self).__init__(**kwargs) - self.kind = 'NodeDeactivateStarted' # type: str - self.node_instance = kwargs['node_instance'] - self.batch_id = kwargs['batch_id'] - self.deactivate_intent = kwargs['deactivate_intent'] - - -class NodeDeactivationInfo(msrest.serialization.Model): - """Information about the node deactivation. This information is valid for a node that is undergoing deactivation or has already been deactivated. - - :param node_deactivation_intent: The intent or the reason for deactivating the node. Following - are the possible values for it. Possible values include: "Invalid", "Pause", "Restart", - "RemoveData", "RemoveNode". - :type node_deactivation_intent: str or ~azure.servicefabric.models.NodeDeactivationIntent - :param node_deactivation_status: The status of node deactivation operation. Following are the - possible values. Possible values include: "None", "SafetyCheckInProgress", - "SafetyCheckComplete", "Completed". - :type node_deactivation_status: str or ~azure.servicefabric.models.NodeDeactivationStatus - :param node_deactivation_task: List of tasks representing the deactivation operation on the - node. - :type node_deactivation_task: list[~azure.servicefabric.models.NodeDeactivationTask] - :param pending_safety_checks: List of pending safety checks. - :type pending_safety_checks: list[~azure.servicefabric.models.SafetyCheckWrapper] + self.node_instance = kwargs.get('node_instance', None) + self.batch_id = kwargs.get('batch_id', None) + self.deactivate_intent = kwargs.get('deactivate_intent', None) + self.kind = 'NodeDeactivateStarted' + + +class NodeDeactivationInfo(Model): + """Information about the node deactivation. This information is valid for a + node that is undergoing deactivation or has already been deactivated. + + :param node_deactivation_intent: The intent or the reason for deactivating + the node. Following are the possible values for it. Possible values + include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' + :type node_deactivation_intent: str or + ~azure.servicefabric.models.NodeDeactivationIntent + :param node_deactivation_status: The status of node deactivation + operation. Following are the possible values. Possible values include: + 'None', 'SafetyCheckInProgress', 'SafetyCheckComplete', 'Completed' + :type node_deactivation_status: str or + ~azure.servicefabric.models.NodeDeactivationStatus + :param node_deactivation_task: List of tasks representing the deactivation + operation on the node. + :type node_deactivation_task: + list[~azure.servicefabric.models.NodeDeactivationTask] + :param pending_safety_checks: List of pending safety checks + :type pending_safety_checks: + list[~azure.servicefabric.models.SafetyCheckWrapper] """ _attribute_map = { @@ -13850,10 +12878,7 @@ class NodeDeactivationInfo(msrest.serialization.Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeDeactivationInfo, self).__init__(**kwargs) self.node_deactivation_intent = kwargs.get('node_deactivation_intent', None) self.node_deactivation_status = kwargs.get('node_deactivation_status', None) @@ -13861,16 +12886,18 @@ def __init__( self.pending_safety_checks = kwargs.get('pending_safety_checks', None) -class NodeDeactivationTask(msrest.serialization.Model): +class NodeDeactivationTask(Model): """The task representing the deactivation operation on the node. - :param node_deactivation_task_id: Identity of the task related to deactivation operation on the - node. - :type node_deactivation_task_id: ~azure.servicefabric.models.NodeDeactivationTaskId - :param node_deactivation_intent: The intent or the reason for deactivating the node. Following - are the possible values for it. Possible values include: "Invalid", "Pause", "Restart", - "RemoveData", "RemoveNode". - :type node_deactivation_intent: str or ~azure.servicefabric.models.NodeDeactivationIntent + :param node_deactivation_task_id: Identity of the task related to + deactivation operation on the node. + :type node_deactivation_task_id: + ~azure.servicefabric.models.NodeDeactivationTaskId + :param node_deactivation_intent: The intent or the reason for deactivating + the node. Following are the possible values for it. Possible values + include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' + :type node_deactivation_intent: str or + ~azure.servicefabric.models.NodeDeactivationIntent """ _attribute_map = { @@ -13878,24 +12905,22 @@ class NodeDeactivationTask(msrest.serialization.Model): 'node_deactivation_intent': {'key': 'NodeDeactivationIntent', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeDeactivationTask, self).__init__(**kwargs) self.node_deactivation_task_id = kwargs.get('node_deactivation_task_id', None) self.node_deactivation_intent = kwargs.get('node_deactivation_intent', None) -class NodeDeactivationTaskId(msrest.serialization.Model): +class NodeDeactivationTaskId(Model): """Identity of the task related to deactivation operation on the node. :param id: Value of the task id. :type id: str - :param node_deactivation_task_type: The type of the task that performed the node deactivation. - Following are the possible values. Possible values include: "Invalid", "Infrastructure", - "Repair", "Client". - :type node_deactivation_task_type: str or ~azure.servicefabric.models.NodeDeactivationTaskType + :param node_deactivation_task_type: The type of the task that performed + the node deactivation. Following are the possible values. Possible values + include: 'Invalid', 'Infrastructure', 'Repair', 'Client' + :type node_deactivation_task_type: str or + ~azure.servicefabric.models.NodeDeactivationTaskType """ _attribute_map = { @@ -13903,10 +12928,7 @@ class NodeDeactivationTaskId(msrest.serialization.Model): 'node_deactivation_task_type': {'key': 'NodeDeactivationTaskType', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeDeactivationTaskId, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.node_deactivation_task_type = kwargs.get('node_deactivation_task_type', None) @@ -13917,95 +12939,75 @@ class NodeDownEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. :type node_instance: long :param last_node_up_at: Required. Time when Node was last up. - :type last_node_up_at: ~datetime.datetime + :type last_node_up_at: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'last_node_up_at': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'last_node_up_at': {'key': 'LastNodeUpAt', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeDownEvent, self).__init__(**kwargs) - self.kind = 'NodeDown' # type: str - self.node_instance = kwargs['node_instance'] - self.last_node_up_at = kwargs['last_node_up_at'] + self.node_instance = kwargs.get('node_instance', None) + self.last_node_up_at = kwargs.get('last_node_up_at', None) + self.kind = 'NodeDown' class NodeHealth(EntityHealth): """Information about the health of a Service Fabric node. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: Name of the node whose health information is described by this object. + :param name: Name of the node whose health information is described by + this object. :type name: str """ @@ -14017,41 +13019,37 @@ class NodeHealth(EntityHealth): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeHealth, self).__init__(**kwargs) self.name = kwargs.get('name', None) class NodeHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a node, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a node, containing information about the + data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the node. The types of the unhealthy evaluations can be EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the node. The types of the + unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -14059,21 +13057,18 @@ class NodeHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Node' # type: str self.node_name = kwargs.get('node_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Node' class NodeHealthReportExpiredEvent(NodeEvent): @@ -14081,38 +13076,18 @@ class NodeHealthReportExpiredEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -14129,16 +13104,17 @@ class NodeHealthReportExpiredEvent(NodeEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -14152,11 +13128,11 @@ class NodeHealthReportExpiredEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -14169,34 +13145,34 @@ class NodeHealthReportExpiredEvent(NodeEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeHealthReportExpiredEvent, self).__init__(**kwargs) - self.kind = 'NodeHealthReportExpired' # type: str - self.node_instance_id = kwargs['node_instance_id'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.node_instance_id = kwargs.get('node_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'NodeHealthReportExpired' class NodeHealthState(EntityHealthState): - """Represents the health state of a node, which contains the node identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + """Represents the health state of a node, which contains the node identifier + and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param name: The name of a Service Fabric node. :type name: str - :param id: An internal ID used by Service Fabric to uniquely identify a node. Node Id is - deterministically generated from node name. + :param id: An internal ID used by Service Fabric to uniquely identify a + node. Node Id is deterministically generated from node name. :type id: ~azure.servicefabric.models.NodeId """ @@ -14206,21 +13182,19 @@ class NodeHealthState(EntityHealthState): 'id': {'key': 'Id', 'type': 'NodeId'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeHealthState, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.id = kwargs.get('id', None) class NodeHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a node, which contains the node name and its aggregated health state. + """Represents the health state chunk of a node, which contains the node name + and its aggregated health state. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str @@ -14231,22 +13205,21 @@ class NodeHealthStateChunk(EntityHealthStateChunk): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeHealthStateChunk, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) class NodeHealthStateChunkList(EntityHealthStateChunkList): - """The list of node health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. + """The list of node health state chunks in the cluster that respect the input + filters in the chunk query. Returned by get cluster health state chunks + query. - :param total_count: Total number of entity health state objects that match the specified - filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match + the specified filters from the cluster health chunk query description. :type total_count: long - :param items: The list of node health state chunks that respect the input filters in the chunk - query. + :param items: The list of node health state chunks that respect the input + filters in the chunk query. :type items: list[~azure.servicefabric.models.NodeHealthStateChunk] """ @@ -14255,48 +13228,51 @@ class NodeHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[NodeHealthStateChunk]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class NodeHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a node should be included in the returned cluster health chunk. -One filter can match zero, one or multiple nodes, depending on its properties. -Can be specified in the cluster health chunk query description. - - :param node_name_filter: Name of the node that matches the filter. The filter is applied only - to the specified node, if it exists. - If the node doesn't exist, no node is returned in the cluster health chunk based on this - filter. - If the node exists, it is included in the cluster health chunk if the health state matches the - other filter properties. - If not specified, all nodes that match the parent filters (if any) are taken into - consideration and matched against the other filter members, like health state filter. +class NodeHealthStateFilter(Model): + """Defines matching criteria to determine whether a node should be included in + the returned cluster health chunk. + One filter can match zero, one or multiple nodes, depending on its + properties. + Can be specified in the cluster health chunk query description. + + :param node_name_filter: Name of the node that matches the filter. The + filter is applied only to the specified node, if it exists. + If the node doesn't exist, no node is returned in the cluster health chunk + based on this filter. + If the node exists, it is included in the cluster health chunk if the + health state matches the other filter properties. + If not specified, all nodes that match the parent filters (if any) are + taken into consideration and matched against the other filter members, + like health state filter. :type node_name_filter: str - :param health_state_filter: The filter for the health state of the nodes. It allows selecting - nodes if they match the desired health states. - The possible values are integer value of one of the following health states. Only nodes that - match the filter are returned. All nodes are used to evaluate the cluster aggregated health - state. - If not specified, default value is None, unless the node name is specified. If the filter has - default value and node name is specified, the matching node is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches nodes with HealthState value of OK (2) and - Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the nodes. + It allows selecting nodes if they match the desired health states. + The possible values are integer value of one of the following health + states. Only nodes that match the filter are returned. All nodes are used + to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the node name is + specified. If the filter has default value and node name is specified, the + matching node is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches nodes with HealthState + value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int """ @@ -14305,17 +13281,15 @@ class NodeHealthStateFilter(msrest.serialization.Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeHealthStateFilter, self).__init__(**kwargs) self.node_name_filter = kwargs.get('node_name_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) -class NodeId(msrest.serialization.Model): - """An internal ID used by Service Fabric to uniquely identify a node. Node Id is deterministically generated from node name. +class NodeId(Model): + """An internal ID used by Service Fabric to uniquely identify a node. Node Id + is deterministically generated from node name. :param id: Value of the node Id. This is a 128 bit integer. :type id: str @@ -14325,25 +13299,22 @@ class NodeId(msrest.serialization.Model): 'id': {'key': 'Id', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeId, self).__init__(**kwargs) self.id = kwargs.get('id', None) -class NodeImpact(msrest.serialization.Model): +class NodeImpact(Model): """Describes the expected impact of a repair to a particular node. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param node_name: Required. The name of the impacted node. :type node_name: str - :param impact_level: The level of impact expected. Possible values include: "Invalid", "None", - "Restart", "RemoveData", "RemoveNode". + :param impact_level: The level of impact expected. Possible values + include: 'Invalid', 'None', 'Restart', 'RemoveData', 'RemoveNode' :type impact_level: str or ~azure.servicefabric.models.ImpactLevel """ @@ -14356,69 +13327,73 @@ class NodeImpact(msrest.serialization.Model): 'impact_level': {'key': 'ImpactLevel', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeImpact, self).__init__(**kwargs) - self.node_name = kwargs['node_name'] + self.node_name = kwargs.get('node_name', None) self.impact_level = kwargs.get('impact_level', None) -class NodeInfo(msrest.serialization.Model): +class NodeInfo(Model): """Information about a node in Service Fabric cluster. :param name: The name of a Service Fabric node. :type name: str - :param ip_address_or_fqdn: The IP address or fully qualified domain name of the node. + :param ip_address_or_fqdn: The IP address or fully qualified domain name + of the node. :type ip_address_or_fqdn: str :param type: The type of the node. :type type: str - :param code_version: The version of Service Fabric binaries that the node is running. + :param code_version: The version of Service Fabric binaries that the node + is running. :type code_version: str - :param config_version: The version of Service Fabric cluster manifest that the node is using. + :param config_version: The version of Service Fabric cluster manifest that + the node is using. :type config_version: str - :param node_status: The status of the node. Possible values include: "Invalid", "Up", "Down", - "Enabling", "Disabling", "Disabled", "Unknown", "Removed". + :param node_status: The status of the node. Possible values include: + 'Invalid', 'Up', 'Down', 'Enabling', 'Disabling', 'Disabled', 'Unknown', + 'Removed' :type node_status: str or ~azure.servicefabric.models.NodeStatus - :param node_up_time_in_seconds: Time in seconds since the node has been in NodeStatus Up. Value - zero indicates that the node is not Up. + :param node_up_time_in_seconds: Time in seconds since the node has been in + NodeStatus Up. Value zero indicates that the node is not Up. :type node_up_time_in_seconds: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param is_seed_node: Indicates if the node is a seed node or not. Returns true if the node is a - seed node, otherwise false. A quorum of seed nodes are required for proper operation of Service - Fabric cluster. + :param is_seed_node: Indicates if the node is a seed node or not. Returns + true if the node is a seed node, otherwise false. A quorum of seed nodes + are required for proper operation of Service Fabric cluster. :type is_seed_node: bool :param upgrade_domain: The upgrade domain of the node. :type upgrade_domain: str :param fault_domain: The fault domain of the node. :type fault_domain: str - :param id: An internal ID used by Service Fabric to uniquely identify a node. Node Id is - deterministically generated from node name. + :param id: An internal ID used by Service Fabric to uniquely identify a + node. Node Id is deterministically generated from node name. :type id: ~azure.servicefabric.models.NodeId - :param instance_id: The ID representing the node instance. While the ID of the node is - deterministically generated from the node name and remains same across restarts, the InstanceId - changes every time node restarts. + :param instance_id: The ID representing the node instance. While the ID of + the node is deterministically generated from the node name and remains + same across restarts, the InstanceId changes every time node restarts. :type instance_id: str - :param node_deactivation_info: Information about the node deactivation. This information is - valid for a node that is undergoing deactivation or has already been deactivated. - :type node_deactivation_info: ~azure.servicefabric.models.NodeDeactivationInfo - :param is_stopped: Indicates if the node is stopped by calling stop node API or not. Returns - true if the node is stopped, otherwise false. + :param node_deactivation_info: Information about the node deactivation. + This information is valid for a node that is undergoing deactivation or + has already been deactivated. + :type node_deactivation_info: + ~azure.servicefabric.models.NodeDeactivationInfo + :param is_stopped: Indicates if the node is stopped by calling stop node + API or not. Returns true if the node is stopped, otherwise false. :type is_stopped: bool - :param node_down_time_in_seconds: Time in seconds since the node has been in NodeStatus Down. - Value zero indicates node is not NodeStatus Down. + :param node_down_time_in_seconds: Time in seconds since the node has been + in NodeStatus Down. Value zero indicates node is not NodeStatus Down. :type node_down_time_in_seconds: str - :param node_up_at: Date time in UTC when the node came up. If the node has never been up then - this value will be zero date time. - :type node_up_at: ~datetime.datetime - :param node_down_at: Date time in UTC when the node went down. If node has never been down then - this value will be zero date time. - :type node_down_at: ~datetime.datetime - :param node_tags: List that contains tags, which will be applied to the nodes. + :param node_up_at: Date time in UTC when the node came up. If the node has + never been up then this value will be zero date time. + :type node_up_at: datetime + :param node_down_at: Date time in UTC when the node went down. If node has + never been down then this value will be zero date time. + :type node_down_at: datetime + :param node_tags: List that contains tags, which will be applied to the + nodes. :type node_tags: list[str] """ @@ -14444,10 +13419,7 @@ class NodeInfo(msrest.serialization.Model): 'node_tags': {'key': 'NodeTags', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) @@ -14470,14 +13442,17 @@ def __init__( self.node_tags = kwargs.get('node_tags', None) -class NodeLoadInfo(msrest.serialization.Model): - """Information about load on a Service Fabric node. It holds a summary of all metrics and their load on a node. +class NodeLoadInfo(Model): + """Information about load on a Service Fabric node. It holds a summary of all + metrics and their load on a node. - :param node_name: Name of the node for which the load information is provided by this object. + :param node_name: Name of the node for which the load information is + provided by this object. :type node_name: str - :param node_load_metric_information: List that contains metrics and their load information on - this node. - :type node_load_metric_information: list[~azure.servicefabric.models.NodeLoadMetricInformation] + :param node_load_metric_information: List that contains metrics and their + load information on this node. + :type node_load_metric_information: + list[~azure.servicefabric.models.NodeLoadMetricInformation] """ _attribute_map = { @@ -14485,49 +13460,52 @@ class NodeLoadInfo(msrest.serialization.Model): 'node_load_metric_information': {'key': 'NodeLoadMetricInformation', 'type': '[NodeLoadMetricInformation]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeLoadInfo, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.node_load_metric_information = kwargs.get('node_load_metric_information', None) -class NodeLoadMetricInformation(msrest.serialization.Model): - """Represents data structure that contains load information for a certain metric on a node. +class NodeLoadMetricInformation(Model): + """Represents data structure that contains load information for a certain + metric on a node. - :param name: Name of the metric for which this load information is provided. + :param name: Name of the metric for which this load information is + provided. :type name: str :param node_capacity: Total capacity on the node for this metric. :type node_capacity: str - :param node_load: Current load on the node for this metric. In future releases of Service - Fabric this parameter will be deprecated in favor of CurrentNodeLoad. + :param node_load: Current load on the node for this metric. In future + releases of Service Fabric this parameter will be deprecated in favor of + CurrentNodeLoad. :type node_load: str - :param node_remaining_capacity: The remaining capacity on the node for this metric. In future - releases of Service Fabric this parameter will be deprecated in favor of NodeCapacityRemaining. + :param node_remaining_capacity: The remaining capacity on the node for + this metric. In future releases of Service Fabric this parameter will be + deprecated in favor of NodeCapacityRemaining. :type node_remaining_capacity: str - :param is_capacity_violation: Indicates if there is a capacity violation for this metric on the - node. + :param is_capacity_violation: Indicates if there is a capacity violation + for this metric on the node. :type is_capacity_violation: bool - :param node_buffered_capacity: The value that indicates the reserved capacity for this metric - on the node. + :param node_buffered_capacity: The value that indicates the reserved + capacity for this metric on the node. :type node_buffered_capacity: str - :param node_remaining_buffered_capacity: The remaining reserved capacity for this metric on the - node. In future releases of Service Fabric this parameter will be deprecated in favor of - BufferedNodeCapacityRemaining. + :param node_remaining_buffered_capacity: The remaining reserved capacity + for this metric on the node. In future releases of Service Fabric this + parameter will be deprecated in favor of BufferedNodeCapacityRemaining. :type node_remaining_buffered_capacity: str :param current_node_load: Current load on the node for this metric. :type current_node_load: str - :param node_capacity_remaining: The remaining capacity on the node for the metric. + :param node_capacity_remaining: The remaining capacity on the node for the + metric. :type node_capacity_remaining: str - :param buffered_node_capacity_remaining: The remaining capacity which is not reserved by - NodeBufferPercentage for this metric on the node. + :param buffered_node_capacity_remaining: The remaining capacity which is + not reserved by NodeBufferPercentage for this metric on the node. :type buffered_node_capacity_remaining: str - :param planned_node_load_removal: This value represents the load of the replicas that are - planned to be removed in the future. - This kind of load is reported for replicas that are currently being moving to other nodes and - for replicas that are currently being dropped but still use the load on the source node. + :param planned_node_load_removal: This value represents the load of the + replicas that are planned to be removed in the future. + This kind of load is reported for replicas that are currently being moving + to other nodes and for replicas that are currently being dropped but still + use the load on the source node. :type planned_node_load_removal: str """ @@ -14545,10 +13523,7 @@ class NodeLoadMetricInformation(msrest.serialization.Model): 'planned_node_load_removal': {'key': 'PlannedNodeLoadRemoval', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeLoadMetricInformation, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.node_capacity = kwargs.get('node_capacity', None) @@ -14568,38 +13543,18 @@ class NodeNewHealthReportEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -14616,16 +13571,17 @@ class NodeNewHealthReportEvent(NodeEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -14639,11 +13595,11 @@ class NodeNewHealthReportEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -14656,21 +13612,18 @@ class NodeNewHealthReportEvent(NodeEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeNewHealthReportEvent, self).__init__(**kwargs) - self.kind = 'NodeNewHealthReport' # type: str - self.node_instance_id = kwargs['node_instance_id'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.node_instance_id = kwargs.get('node_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'NodeNewHealthReport' class NodeOpenFailedEvent(NodeEvent): @@ -14678,38 +13631,18 @@ class NodeOpenFailedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -14733,9 +13666,9 @@ class NodeOpenFailedEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -14749,11 +13682,11 @@ class NodeOpenFailedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -14766,21 +13699,18 @@ class NodeOpenFailedEvent(NodeEvent): 'error': {'key': 'Error', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeOpenFailedEvent, self).__init__(**kwargs) - self.kind = 'NodeOpenFailed' # type: str - self.node_instance = kwargs['node_instance'] - self.node_id = kwargs['node_id'] - self.upgrade_domain = kwargs['upgrade_domain'] - self.fault_domain = kwargs['fault_domain'] - self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] - self.hostname = kwargs['hostname'] - self.is_seed_node = kwargs['is_seed_node'] - self.node_version = kwargs['node_version'] - self.error = kwargs['error'] + self.node_instance = kwargs.get('node_instance', None) + self.node_id = kwargs.get('node_id', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.hostname = kwargs.get('hostname', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.node_version = kwargs.get('node_version', None) + self.error = kwargs.get('error', None) + self.kind = 'NodeOpenFailed' class NodeOpenSucceededEvent(NodeEvent): @@ -14788,38 +13718,18 @@ class NodeOpenSucceededEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -14841,9 +13751,9 @@ class NodeOpenSucceededEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -14856,11 +13766,11 @@ class NodeOpenSucceededEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -14872,20 +13782,17 @@ class NodeOpenSucceededEvent(NodeEvent): 'node_version': {'key': 'NodeVersion', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeOpenSucceededEvent, self).__init__(**kwargs) - self.kind = 'NodeOpenSucceeded' # type: str - self.node_instance = kwargs['node_instance'] - self.node_id = kwargs['node_id'] - self.upgrade_domain = kwargs['upgrade_domain'] - self.fault_domain = kwargs['fault_domain'] - self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] - self.hostname = kwargs['hostname'] - self.is_seed_node = kwargs['is_seed_node'] - self.node_version = kwargs['node_version'] + self.node_instance = kwargs.get('node_instance', None) + self.node_id = kwargs.get('node_id', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.hostname = kwargs.get('hostname', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.node_version = kwargs.get('node_version', None) + self.kind = 'NodeOpenSucceeded' class NodeRemovedFromClusterEvent(NodeEvent): @@ -14893,38 +13800,18 @@ class NodeRemovedFromClusterEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -14942,9 +13829,9 @@ class NodeRemovedFromClusterEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -14955,11 +13842,11 @@ class NodeRemovedFromClusterEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, @@ -14969,33 +13856,29 @@ class NodeRemovedFromClusterEvent(NodeEvent): 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeRemovedFromClusterEvent, self).__init__(**kwargs) - self.kind = 'NodeRemovedFromCluster' # type: str - self.node_id = kwargs['node_id'] - self.node_instance = kwargs['node_instance'] - self.node_type = kwargs['node_type'] - self.fabric_version = kwargs['fabric_version'] - self.ip_address_or_fqdn = kwargs['ip_address_or_fqdn'] - self.node_capacities = kwargs['node_capacities'] + self.node_id = kwargs.get('node_id', None) + self.node_instance = kwargs.get('node_instance', None) + self.node_type = kwargs.get('node_type', None) + self.fabric_version = kwargs.get('fabric_version', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.node_capacities = kwargs.get('node_capacities', None) + self.kind = 'NodeRemovedFromCluster' -class RepairImpactDescriptionBase(msrest.serialization.Model): +class RepairImpactDescriptionBase(Model): """Describes the expected impact of executing a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeRepairImpactDescription. + sub-classes are: NodeRepairImpactDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of repair impact represented by the current object.Constant - filled by server. Possible values include: "Invalid", "Node". - :type kind: str or ~azure.servicefabric.models.RepairImpactKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -15010,26 +13893,22 @@ class RepairImpactDescriptionBase(msrest.serialization.Model): 'kind': {'Node': 'NodeRepairImpactDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RepairImpactDescriptionBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class NodeRepairImpactDescription(RepairImpactDescriptionBase): """Describes the expected impact of a repair on a set of nodes. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of repair impact represented by the current object.Constant - filled by server. Possible values include: "Invalid", "Node". - :type kind: str or ~azure.servicefabric.models.RepairImpactKind - :param node_impact_list: The list of nodes impacted by a repair action and their respective - expected impact. + :param kind: Required. Constant filled by server. + :type kind: str + :param node_impact_list: The list of nodes impacted by a repair action and + their respective expected impact. :type node_impact_list: list[~azure.servicefabric.models.NodeImpact] """ @@ -15042,28 +13921,24 @@ class NodeRepairImpactDescription(RepairImpactDescriptionBase): 'node_impact_list': {'key': 'NodeImpactList', 'type': '[NodeImpact]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeRepairImpactDescription, self).__init__(**kwargs) - self.kind = 'Node' # type: str self.node_impact_list = kwargs.get('node_impact_list', None) + self.kind = 'Node' -class RepairTargetDescriptionBase(msrest.serialization.Model): +class RepairTargetDescriptionBase(Model): """Describes the entities targeted by a repair action. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeRepairTargetDescription. + sub-classes are: NodeRepairTargetDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of repair target described by the current object.Constant - filled by server. Possible values include: "Invalid", "Node". - :type kind: str or ~azure.servicefabric.models.RepairTargetKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -15078,24 +13953,20 @@ class RepairTargetDescriptionBase(msrest.serialization.Model): 'kind': {'Node': 'NodeRepairTargetDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RepairTargetDescriptionBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class NodeRepairTargetDescription(RepairTargetDescriptionBase): """Describes the list of nodes targeted by a repair action. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of repair target described by the current object.Constant - filled by server. Possible values include: "Invalid", "Node". - :type kind: str or ~azure.servicefabric.models.RepairTargetKind + :param kind: Required. Constant filled by server. + :type kind: str :param node_names: The list of nodes targeted by a repair action. :type node_names: list[str] """ @@ -15109,17 +13980,15 @@ class NodeRepairTargetDescription(RepairTargetDescriptionBase): 'node_names': {'key': 'NodeNames', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeRepairTargetDescription, self).__init__(**kwargs) - self.kind = 'Node' # type: str self.node_names = kwargs.get('node_names', None) + self.kind = 'Node' -class NodeResult(msrest.serialization.Model): - """Contains information about a node that was targeted by a user-induced operation. +class NodeResult(Model): + """Contains information about a node that was targeted by a user-induced + operation. :param node_name: The name of a Service Fabric node. :type node_name: str @@ -15132,45 +14001,41 @@ class NodeResult(msrest.serialization.Model): 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeResult, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.node_instance_id = kwargs.get('node_instance_id', None) class NodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for nodes, containing health evaluations for each unhealthy node that impacted current aggregated health state. Can be returned when evaluating cluster health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for nodes, containing health evaluations for + each unhealthy node that impacted current aggregated health state. Can be + returned when evaluating cluster health and the aggregated health state is + either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes from the - ClusterHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_nodes: Maximum allowed percentage of + unhealthy nodes from the ClusterHealthPolicy. :type max_percent_unhealthy_nodes: int :param total_count: Total number of nodes found in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -15178,34 +14043,31 @@ class NodesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodesHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Nodes' # type: str self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Nodes' -class NodeTagsDescription(msrest.serialization.Model): +class NodeTagsDescription(Model): """Describes the tags required for placement or running of the service. All required parameters must be populated in order to send to Azure. :param count: Required. The number of tags. :type count: int - :param tags: Required. A set of tags. Array of size specified by the ‘Count’ parameter, for the - placement tags of the service. + :param tags: Required. Array of size specified by the ‘Count’ parameter, + for the placement tags of the service. :type tags: list[str] """ @@ -15219,25 +14081,26 @@ class NodeTagsDescription(msrest.serialization.Model): 'tags': {'key': 'Tags', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeTagsDescription, self).__init__(**kwargs) - self.count = kwargs['count'] - self.tags = kwargs['tags'] + self.count = kwargs.get('count', None) + self.tags = kwargs.get('tags', None) -class NodeTransitionProgress(msrest.serialization.Model): - """Information about an NodeTransition operation. This class contains an OperationState and a NodeTransitionResult. The NodeTransitionResult is not valid until OperationState -is Completed or Faulted. +class NodeTransitionProgress(Model): + """Information about an NodeTransition operation. This class contains an + OperationState and a NodeTransitionResult. The NodeTransitionResult is not + valid until OperationState + is Completed or Faulted. - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param node_transition_result: Represents information about an operation in a terminal state - (Completed or Faulted). - :type node_transition_result: ~azure.servicefabric.models.NodeTransitionResult + :param node_transition_result: Represents information about an operation + in a terminal state (Completed or Faulted). + :type node_transition_result: + ~azure.servicefabric.models.NodeTransitionResult """ _attribute_map = { @@ -15245,23 +14108,21 @@ class NodeTransitionProgress(msrest.serialization.Model): 'node_transition_result': {'key': 'NodeTransitionResult', 'type': 'NodeTransitionResult'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeTransitionProgress, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.node_transition_result = kwargs.get('node_transition_result', None) -class NodeTransitionResult(msrest.serialization.Model): - """Represents information about an operation in a terminal state (Completed or Faulted). +class NodeTransitionResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). - :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, - this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. :type error_code: int - :param node_result: Contains information about a node that was targeted by a user-induced - operation. + :param node_result: Contains information about a node that was targeted by + a user-induced operation. :type node_result: ~azure.servicefabric.models.NodeResult """ @@ -15270,30 +14131,29 @@ class NodeTransitionResult(msrest.serialization.Model): 'node_result': {'key': 'NodeResult', 'type': 'NodeResult'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeTransitionResult, self).__init__(**kwargs) self.error_code = kwargs.get('error_code', None) self.node_result = kwargs.get('node_result', None) -class NodeTypeHealthPolicyMapItem(msrest.serialization.Model): +class NodeTypeHealthPolicyMapItem(Model): """Defines an item in NodeTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the node type health policy map item. This is the name of the - node type. + :param key: Required. The key of the node type health policy map item. + This is the name of the node type. :type key: str :param value: Required. The value of the node type health policy map item. - If the percentage is respected but there is at least one unhealthy node in the node type, the - health is evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy nodes over the total number - of nodes in the node type. - The computation rounds up to tolerate one failure on small numbers of nodes. - The max percent unhealthy nodes allowed for the node type. Must be between zero and 100. + If the percentage is respected but there is at least one unhealthy node in + the node type, the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy nodes + over the total number of nodes in the node type. + The computation rounds up to tolerate one failure on small numbers of + nodes. + The max percent unhealthy nodes allowed for the node type. Must be between + zero and 100. :type value: int """ @@ -15307,48 +14167,48 @@ class NodeTypeHealthPolicyMapItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeTypeHealthPolicyMapItem, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) class NodeTypeNodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for nodes of a particular node type. The node type nodes evaluation can be returned when cluster health evaluation returns unhealthy aggregated health state, either Error or Warning. It contains health evaluations for each unhealthy node of the included node type that impacted current aggregated health state. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for nodes of a particular node type. The node + type nodes evaluation can be returned when cluster health evaluation + returns unhealthy aggregated health state, either Error or Warning. It + contains health evaluations for each unhealthy node of the included node + type that impacted current aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param node_type_name: The node type name as defined in the cluster manifest. + :param kind: Required. Constant filled by server. + :type kind: str + :param node_type_name: The node type name as defined in the cluster + manifest. :type node_type_name: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes for the node - type, specified as an entry in NodeTypeHealthPolicyMap. + :param max_percent_unhealthy_nodes: Maximum allowed percentage of + unhealthy nodes for the node type, specified as an entry in + NodeTypeHealthPolicyMap. :type max_percent_unhealthy_nodes: int - :param total_count: Total number of nodes of the node type found in the health store. + :param total_count: Total number of nodes of the node type found in the + health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy NodeHealthEvaluation of this node type that impacted the - aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation of this node type that impacted the aggregated + health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -15356,25 +14216,22 @@ class NodeTypeNodesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_type_name': {'key': 'NodeTypeName', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeTypeNodesHealthEvaluation, self).__init__(**kwargs) - self.kind = 'NodeTypeNodes' # type: str self.node_type_name = kwargs.get('node_type_name', None) self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'NodeTypeNodes' class NodeUpEvent(NodeEvent): @@ -15382,86 +14239,65 @@ class NodeUpEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. :type node_instance: long :param last_node_down_at: Required. Time when Node was last down. - :type last_node_down_at: ~datetime.datetime + :type last_node_down_at: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'last_node_down_at': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'last_node_down_at': {'key': 'LastNodeDownAt', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeUpEvent, self).__init__(**kwargs) - self.kind = 'NodeUp' # type: str - self.node_instance = kwargs['node_instance'] - self.last_node_down_at = kwargs['last_node_down_at'] + self.node_instance = kwargs.get('node_instance', None) + self.last_node_down_at = kwargs.get('last_node_down_at', None) + self.kind = 'NodeUp' -class NodeUpgradeProgressInfo(msrest.serialization.Model): +class NodeUpgradeProgressInfo(Model): """Information about the upgrading node and its status. :param node_name: The name of a Service Fabric node. :type node_name: str - :param upgrade_phase: The state of the upgrading node. Possible values include: "Invalid", - "PreUpgradeSafetyCheck", "Upgrading", "PostUpgradeSafetyCheck". + :param upgrade_phase: The state of the upgrading node. Possible values + include: 'Invalid', 'PreUpgradeSafetyCheck', 'Upgrading', + 'PostUpgradeSafetyCheck' :type upgrade_phase: str or ~azure.servicefabric.models.NodeUpgradePhase - :param pending_safety_checks: List of pending safety checks. - :type pending_safety_checks: list[~azure.servicefabric.models.SafetyCheckWrapper] + :param pending_safety_checks: List of pending safety checks + :type pending_safety_checks: + list[~azure.servicefabric.models.SafetyCheckWrapper] """ _attribute_map = { @@ -15470,27 +14306,27 @@ class NodeUpgradeProgressInfo(msrest.serialization.Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(NodeUpgradeProgressInfo, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.upgrade_phase = kwargs.get('upgrade_phase', None) self.pending_safety_checks = kwargs.get('pending_safety_checks', None) -class OperationStatus(msrest.serialization.Model): - """Contains the OperationId, OperationState, and OperationType for user-induced operations. +class OperationStatus(Model): + """Contains the OperationId, OperationState, and OperationType for + user-induced operations. - :param operation_id: A GUID that identifies a call to this API. This is also passed into the - corresponding GetProgress API. + :param operation_id: A GUID that identifies a call to this API. This is + also passed into the corresponding GetProgress API. :type operation_id: str - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param type: The type of the operation. Possible values include: "Invalid", - "PartitionDataLoss", "PartitionQuorumLoss", "PartitionRestart", "NodeTransition". + :param type: The type of the operation. Possible values include: + 'Invalid', 'PartitionDataLoss', 'PartitionQuorumLoss', 'PartitionRestart', + 'NodeTransition' :type type: str or ~azure.servicefabric.models.OperationType """ @@ -15500,26 +14336,25 @@ class OperationStatus(msrest.serialization.Model): 'type': {'key': 'Type', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(OperationStatus, self).__init__(**kwargs) self.operation_id = kwargs.get('operation_id', None) self.state = kwargs.get('state', None) self.type = kwargs.get('type', None) -class PackageSharingPolicyInfo(msrest.serialization.Model): +class PackageSharingPolicyInfo(Model): """Represents a policy for the package sharing. - :param shared_package_name: The name of code, configuration or data package that should be - shared. + :param shared_package_name: The name of code, configuration or data + package that should be shared. :type shared_package_name: str - :param package_sharing_scope: Represents the scope for PackageSharingPolicy. This is specified - during DeployServicePackageToNode operation. Possible values include: "None", "All", "Code", - "Config", "Data". - :type package_sharing_scope: str or ~azure.servicefabric.models.PackageSharingPolicyScope + :param package_sharing_scope: Represents the scope for + PackageSharingPolicy. This is specified during DeployServicePackageToNode + operation. Possible values include: 'None', 'All', 'Code', 'Config', + 'Data' + :type package_sharing_scope: str or + ~azure.servicefabric.models.PackageSharingPolicyScope """ _attribute_map = { @@ -15527,23 +14362,24 @@ class PackageSharingPolicyInfo(msrest.serialization.Model): 'package_sharing_scope': {'key': 'PackageSharingScope', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PackageSharingPolicyInfo, self).__init__(**kwargs) self.shared_package_name = kwargs.get('shared_package_name', None) self.package_sharing_scope = kwargs.get('package_sharing_scope', None) -class PagedApplicationInfoList(msrest.serialization.Model): - """The list of applications in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedApplicationInfoList(Model): + """The list of applications in the cluster. The list is paged when all of the + results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of application information. :type items: list[~azure.servicefabric.models.ApplicationInfo] @@ -15554,26 +14390,28 @@ class PagedApplicationInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ApplicationInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedApplicationInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedApplicationResourceDescriptionList(msrest.serialization.Model): - """The list of application resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedApplicationResourceDescriptionList(Model): + """The list of application resources. The list is paged when all of the + results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. - :type items: list[~azure.servicefabric.models.ApplicationResourceDescription] + :type items: + list[~azure.servicefabric.models.ApplicationResourceDescription] """ _attribute_map = { @@ -15581,23 +14419,24 @@ class PagedApplicationResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ApplicationResourceDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedApplicationResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedApplicationTypeInfoList(msrest.serialization.Model): - """The list of application types that are provisioned or being provisioned in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedApplicationTypeInfoList(Model): + """The list of application types that are provisioned or being provisioned in + the cluster. The list is paged when all of the results cannot fit in a + single message. The next set of results can be obtained by executing the + same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of application type information. :type items: list[~azure.servicefabric.models.ApplicationTypeInfo] @@ -15608,23 +14447,24 @@ class PagedApplicationTypeInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ApplicationTypeInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedApplicationTypeInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedBackupConfigurationInfoList(msrest.serialization.Model): - """The list of backup configuration information. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedBackupConfigurationInfoList(Model): + """The list of backup configuration information. The list is paged when all of + the results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of backup configuration information. :type items: list[~azure.servicefabric.models.BackupConfigurationInfo] @@ -15635,23 +14475,24 @@ class PagedBackupConfigurationInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[BackupConfigurationInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedBackupConfigurationInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedBackupEntityList(msrest.serialization.Model): - """The list of backup entities that are being periodically backed. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedBackupEntityList(Model): + """The list of backup entities that are being periodically backed. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of backup entity information. :type items: list[~azure.servicefabric.models.BackupEntity] @@ -15662,23 +14503,23 @@ class PagedBackupEntityList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[BackupEntity]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedBackupEntityList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedBackupInfoList(msrest.serialization.Model): - """The list of backups. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedBackupInfoList(Model): + """The list of backups. The list is paged when all of the results cannot fit + in a single message. The next set of results can be obtained by executing + the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of backup information. :type items: list[~azure.servicefabric.models.BackupInfo] @@ -15689,23 +14530,24 @@ class PagedBackupInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[BackupInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedBackupInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedBackupPolicyDescriptionList(msrest.serialization.Model): - """The list of backup policies configured in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedBackupPolicyDescriptionList(Model): + """The list of backup policies configured in the cluster. The list is paged + when all of the results cannot fit in a single message. The next set of + results can be obtained by executing the same query with the continuation + token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: The list of backup policies information. :type items: list[~azure.servicefabric.models.BackupPolicyDescription] @@ -15716,23 +14558,24 @@ class PagedBackupPolicyDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[BackupPolicyDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedBackupPolicyDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedComposeDeploymentStatusInfoList(msrest.serialization.Model): - """The list of compose deployments in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedComposeDeploymentStatusInfoList(Model): + """The list of compose deployments in the cluster. The list is paged when all + of the results cannot fit in a single message. The next set of results can + be obtained by executing the same query with the continuation token + provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of compose deployment status information. :type items: list[~azure.servicefabric.models.ComposeDeploymentStatusInfo] @@ -15743,25 +14586,25 @@ class PagedComposeDeploymentStatusInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ComposeDeploymentStatusInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedComposeDeploymentStatusInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedDeployedApplicationInfoList(msrest.serialization.Model): - """The list of deployed applications in activating, downloading, or active states on a node. -The list is paged when all of the results cannot fit in a single message. -The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedDeployedApplicationInfoList(Model): + """The list of deployed applications in activating, downloading, or active + states on a node. + The list is paged when all of the results cannot fit in a single message. + The next set of results can be obtained by executing the same query with + the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of deployed application information. :type items: list[~azure.servicefabric.models.DeployedApplicationInfo] @@ -15772,23 +14615,23 @@ class PagedDeployedApplicationInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[DeployedApplicationInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedDeployedApplicationInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedGatewayResourceDescriptionList(msrest.serialization.Model): - """The list of gateway resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedGatewayResourceDescriptionList(Model): + """The list of gateway resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.GatewayResourceDescription] @@ -15799,23 +14642,23 @@ class PagedGatewayResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[GatewayResourceDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedGatewayResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedNetworkResourceDescriptionList(msrest.serialization.Model): - """The list of network resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedNetworkResourceDescriptionList(Model): + """The list of network resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.NetworkResourceDescription] @@ -15826,23 +14669,23 @@ class PagedNetworkResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[NetworkResourceDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedNetworkResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedNodeInfoList(msrest.serialization.Model): - """The list of nodes in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedNodeInfoList(Model): + """The list of nodes in the cluster. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of node information. :type items: list[~azure.servicefabric.models.NodeInfo] @@ -15853,26 +14696,28 @@ class PagedNodeInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[NodeInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedNodeInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedPropertyInfoList(msrest.serialization.Model): - """The paged list of Service Fabric properties under a given name. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedPropertyInfoList(Model): + """The paged list of Service Fabric properties under a given name. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str - :param is_consistent: Indicates whether any property under the given name has been modified - during the enumeration. If there was a modification, this property value is false. + :param is_consistent: Indicates whether any property under the given name + has been modified during the enumeration. If there was a modification, + this property value is false. :type is_consistent: bool :param properties: List of property information. :type properties: list[~azure.servicefabric.models.PropertyInfo] @@ -15884,24 +14729,25 @@ class PagedPropertyInfoList(msrest.serialization.Model): 'properties': {'key': 'Properties', 'type': '[PropertyInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedPropertyInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.is_consistent = kwargs.get('is_consistent', None) self.properties = kwargs.get('properties', None) -class PagedReplicaInfoList(msrest.serialization.Model): - """The list of replicas in the cluster for a given partition. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedReplicaInfoList(Model): + """The list of replicas in the cluster for a given partition. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of replica information. :type items: list[~azure.servicefabric.models.ReplicaInfo] @@ -15912,23 +14758,23 @@ class PagedReplicaInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ReplicaInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedReplicaInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedSecretResourceDescriptionList(msrest.serialization.Model): - """The list of secret resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedSecretResourceDescriptionList(Model): + """The list of secret resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.SecretResourceDescription] @@ -15939,26 +14785,28 @@ class PagedSecretResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[SecretResourceDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedSecretResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedSecretValueResourceDescriptionList(msrest.serialization.Model): - """The list of values of a secret resource, paged if the number of results exceeds the limits of a single message. The next set of results can be obtained by executing the same query with the continuation token provided in the previous page. +class PagedSecretValueResourceDescriptionList(Model): + """The list of values of a secret resource, paged if the number of results + exceeds the limits of a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in the previous page. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. - :type items: list[~azure.servicefabric.models.SecretValueResourceDescription] + :type items: + list[~azure.servicefabric.models.SecretValueResourceDescription] """ _attribute_map = { @@ -15966,23 +14814,24 @@ class PagedSecretValueResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[SecretValueResourceDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedSecretValueResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedServiceInfoList(msrest.serialization.Model): - """The list of services in the cluster for an application. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedServiceInfoList(Model): + """The list of services in the cluster for an application. The list is paged + when all of the results cannot fit in a single message. The next set of + results can be obtained by executing the same query with the continuation + token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of service information. :type items: list[~azure.servicefabric.models.ServiceInfo] @@ -15993,23 +14842,24 @@ class PagedServiceInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServiceInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedServiceInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedServicePartitionInfoList(msrest.serialization.Model): - """The list of partition in the cluster for a service. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedServicePartitionInfoList(Model): + """The list of partition in the cluster for a service. The list is paged when + all of the results cannot fit in a single message. The next set of results + can be obtained by executing the same query with the continuation token + provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of service partition information. :type items: list[~azure.servicefabric.models.ServicePartitionInfo] @@ -16020,23 +14870,24 @@ class PagedServicePartitionInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServicePartitionInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedServicePartitionInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedServiceReplicaDescriptionList(msrest.serialization.Model): - """The list of service resource replicas in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedServiceReplicaDescriptionList(Model): + """The list of service resource replicas in the cluster. The list is paged + when all of the results cannot fit in a single message. The next set of + results can be obtained by executing the same query with the continuation + token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of service resource replica description. :type items: list[~azure.servicefabric.models.ServiceReplicaDescription] @@ -16047,23 +14898,23 @@ class PagedServiceReplicaDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServiceReplicaDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedServiceReplicaDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedServiceResourceDescriptionList(msrest.serialization.Model): - """The list of service resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedServiceResourceDescriptionList(Model): + """The list of service resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.ServiceResourceDescription] @@ -16074,26 +14925,28 @@ class PagedServiceResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServiceResourceDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedServiceResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedSubNameInfoList(msrest.serialization.Model): - """A paged list of Service Fabric names. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedSubNameInfoList(Model): + """A paged list of Service Fabric names. The list is paged when all of the + results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str - :param is_consistent: Indicates whether any name under the given name has been modified during - the enumeration. If there was a modification, this property value is false. + :param is_consistent: Indicates whether any name under the given name has + been modified during the enumeration. If there was a modification, this + property value is false. :type is_consistent: bool :param sub_names: List of the child names. :type sub_names: list[str] @@ -16105,24 +14958,25 @@ class PagedSubNameInfoList(msrest.serialization.Model): 'sub_names': {'key': 'SubNames', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedSubNameInfoList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.is_consistent = kwargs.get('is_consistent', None) self.sub_names = kwargs.get('sub_names', None) -class PagedUpdatePartitionLoadResultList(msrest.serialization.Model): - """The list of results of the call UpdatePartitionLoad. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedUpdatePartitionLoadResultList(Model): + """The list of results of the call UpdatePartitionLoad. The list is paged when + all of the results cannot fit in a single message. The next set of results + can be obtained by executing the same query with the continuation token + provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of partition load update information. :type items: list[~azure.servicefabric.models.UpdatePartitionLoadResult] @@ -16133,23 +14987,23 @@ class PagedUpdatePartitionLoadResultList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[UpdatePartitionLoadResult]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedUpdatePartitionLoadResultList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) -class PagedVolumeResourceDescriptionList(msrest.serialization.Model): - """The list of volume resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedVolumeResourceDescriptionList(Model): + """The list of volume resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.VolumeResourceDescription] @@ -16160,10 +15014,7 @@ class PagedVolumeResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[VolumeResourceDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PagedVolumeResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = kwargs.get('continuation_token', None) self.items = kwargs.get('items', None) @@ -16173,65 +15024,46 @@ class PartitionAnalysisEvent(PartitionEvent): """Represents the base for all Partition Analysis Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionPrimaryMoveAnalysisEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: PartitionPrimaryMoveAnalysisEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param metadata: Required. Metadata about an Analysis Event. :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'metadata': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, } @@ -16240,33 +15072,32 @@ class PartitionAnalysisEvent(PartitionEvent): 'kind': {'PartitionPrimaryMoveAnalysis': 'PartitionPrimaryMoveAnalysisEvent'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionAnalysisEvent, self).__init__(**kwargs) - self.kind = 'PartitionAnalysisEvent' # type: str - self.metadata = kwargs['metadata'] + self.metadata = kwargs.get('metadata', None) + self.kind = 'PartitionAnalysisEvent' class PartitionBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information, for a specific partition, specifying what backup policy is being applied and suspend description, if any. + """Backup configuration information, for a specific partition, specifying what + backup policy is being applied and suspend description, if any. All required parameters must be populated in order to send to Azure. - :param kind: Required. The entity type of a Service Fabric entity such as Application, Service - or a Partition where periodic backups can be enabled.Constant filled by server. Possible - values include: "Invalid", "Partition", "Service", "Application". - :type kind: str or ~azure.servicefabric.models.BackupEntityKind - :param policy_name: The name of the backup policy which is applicable to this Service Fabric - application or service or partition. + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup policy is applied. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str :param partition_id: The partition ID identifying the partition. :type partition_id: str @@ -16277,22 +15108,19 @@ class PartitionBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionBackupConfigurationInfo, self).__init__(**kwargs) - self.kind = 'Partition' # type: str self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) + self.kind = 'Partition' class PartitionBackupEntity(BackupEntity): @@ -16300,11 +15128,10 @@ class PartitionBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, - Service or a Partition where periodic backups can be enabled.Constant filled by server. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str :param partition_id: The partition ID identifying the partition. :type partition_id: str @@ -16320,25 +15147,24 @@ class PartitionBackupEntity(BackupEntity): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionBackupEntity, self).__init__(**kwargs) - self.entity_kind = 'Partition' # type: str self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) + self.entity_kind = 'Partition' -class PartitionDataLossProgress(msrest.serialization.Model): +class PartitionDataLossProgress(Model): """Information about a partition data loss user-induced operation. - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param invoke_data_loss_result: Represents information about an operation in a terminal state - (Completed or Faulted). - :type invoke_data_loss_result: ~azure.servicefabric.models.InvokeDataLossResult + :param invoke_data_loss_result: Represents information about an operation + in a terminal state (Completed or Faulted). + :type invoke_data_loss_result: + ~azure.servicefabric.models.InvokeDataLossResult """ _attribute_map = { @@ -16346,10 +15172,7 @@ class PartitionDataLossProgress(msrest.serialization.Model): 'invoke_data_loss_result': {'key': 'InvokeDataLossResult', 'type': 'InvokeDataLossResult'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionDataLossProgress, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.invoke_data_loss_result = kwargs.get('invoke_data_loss_result', None) @@ -16358,25 +15181,30 @@ def __init__( class PartitionHealth(EntityHealth): """Information about the health of a Service Fabric partition. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param partition_id: ID of the partition whose health information is described by this object. + :param partition_id: ID of the partition whose health information is + described by this object. :type partition_id: str - :param replica_health_states: The list of replica health states associated with the partition. - :type replica_health_states: list[~azure.servicefabric.models.ReplicaHealthState] + :param replica_health_states: The list of replica health states associated + with the partition. + :type replica_health_states: + list[~azure.servicefabric.models.ReplicaHealthState] """ _attribute_map = { @@ -16388,43 +15216,40 @@ class PartitionHealth(EntityHealth): 'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionHealth, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.replica_health_states = kwargs.get('replica_health_states', None) class PartitionHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a partition, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a partition, containing information about + the data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param partition_id: Id of the partition whose health evaluation is described by this object. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition whose health evaluation is + described by this object. :type partition_id: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the partition. The types of the unhealthy evaluations can be - ReplicasHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the partition. The types of the + unhealthy evaluations can be ReplicasHealthEvaluation or + EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -16432,21 +15257,18 @@ class PartitionHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Partition' # type: str self.partition_id = kwargs.get('partition_id', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Partition' class PartitionHealthReportExpiredEvent(PartitionEvent): @@ -16454,42 +15276,23 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param source_id: Required. Id of report source. :type source_id: str @@ -16503,16 +15306,17 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, @@ -16525,11 +15329,11 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, @@ -16541,30 +15345,31 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionHealthReportExpiredEvent, self).__init__(**kwargs) - self.kind = 'PartitionHealthReportExpired' # type: str - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'PartitionHealthReportExpired' class PartitionHealthState(EntityHealthState): - """Represents the health state of a partition, which contains the partition identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param partition_id: Id of the partition whose health state is described by this object. + """Represents the health state of a partition, which contains the partition + identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: Id of the partition whose health state is described + by this object. :type partition_id: str """ @@ -16573,26 +15378,27 @@ class PartitionHealthState(EntityHealthState): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionHealthState, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) class PartitionHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a partition, which contains the partition ID, its aggregated health state and any replicas that respect the filters in the cluster health chunk query description. + """Represents the health state chunk of a partition, which contains the + partition ID, its aggregated health state and any replicas that respect the + filters in the cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param partition_id: The Id of the partition. :type partition_id: str - :param replica_health_state_chunks: The list of replica health state chunks belonging to the - partition that respect the filters in the cluster health chunk query description. - :type replica_health_state_chunks: ~azure.servicefabric.models.ReplicaHealthStateChunkList + :param replica_health_state_chunks: The list of replica health state + chunks belonging to the partition that respect the filters in the cluster + health chunk query description. + :type replica_health_state_chunks: + ~azure.servicefabric.models.ReplicaHealthStateChunkList """ _attribute_map = { @@ -16601,21 +15407,20 @@ class PartitionHealthStateChunk(EntityHealthStateChunk): 'replica_health_state_chunks': {'key': 'ReplicaHealthStateChunks', 'type': 'ReplicaHealthStateChunkList'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionHealthStateChunk, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.replica_health_state_chunks = kwargs.get('replica_health_state_chunks', None) -class PartitionHealthStateChunkList(msrest.serialization.Model): - """The list of partition health state chunks that respect the input filters in the chunk query description. -Returned by get cluster health state chunks query as part of the parent application hierarchy. +class PartitionHealthStateChunkList(Model): + """The list of partition health state chunks that respect the input filters in + the chunk query description. + Returned by get cluster health state chunks query as part of the parent + application hierarchy. - :param items: The list of partition health state chunks that respect the input filters in the - chunk query. + :param items: The list of partition health state chunks that respect the + input filters in the chunk query. :type items: list[~azure.servicefabric.models.PartitionHealthStateChunk] """ @@ -16623,58 +15428,68 @@ class PartitionHealthStateChunkList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[PartitionHealthStateChunk]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class PartitionHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a partition should be included as a child of a service in the cluster health chunk. -The partitions are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent service and application must be included in the cluster health chunk. -One filter can match zero, one or multiple partitions, depending on its properties. - - :param partition_id_filter: ID of the partition that matches the filter. The filter is applied - only to the specified partition, if it exists. - If the partition doesn't exist, no partition is returned in the cluster health chunk based on - this filter. - If the partition exists, it is included in the cluster health chunk if it respects the other - filter properties. - If not specified, all partitions that match the parent filters (if any) are taken into - consideration and matched against the other filter members, like health state filter. +class PartitionHealthStateFilter(Model): + """Defines matching criteria to determine whether a partition should be + included as a child of a service in the cluster health chunk. + The partitions are only returned if the parent entities match a filter + specified in the cluster health chunk query description. The parent service + and application must be included in the cluster health chunk. + One filter can match zero, one or multiple partitions, depending on its + properties. + + :param partition_id_filter: ID of the partition that matches the filter. + The filter is applied only to the specified partition, if it exists. + If the partition doesn't exist, no partition is returned in the cluster + health chunk based on this filter. + If the partition exists, it is included in the cluster health chunk if it + respects the other filter properties. + If not specified, all partitions that match the parent filters (if any) + are taken into consideration and matched against the other filter members, + like health state filter. :type partition_id_filter: str - :param health_state_filter: The filter for the health state of the partitions. It allows - selecting partitions if they match the desired health states. - The possible values are integer value of one of the following health states. Only partitions - that match the filter are returned. All partitions are used to evaluate the cluster aggregated - health state. - If not specified, default value is None, unless the partition ID is specified. If the filter - has default value and partition ID is specified, the matching partition is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches partitions with HealthState value of OK - (2) and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + partitions. It allows selecting partitions if they match the desired + health states. + The possible values are integer value of one of the following health + states. Only partitions that match the filter are returned. All partitions + are used to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the partition ID is + specified. If the filter has default value and partition ID is specified, + the matching partition is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches partitions with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int - :param replica_filters: Defines a list of filters that specify which replicas to be included in - the returned cluster health chunk as children of the parent partition. The replicas are - returned only if the parent partition matches a filter. - If the list is empty, no replicas are returned. All the replicas are used to evaluate the - parent partition aggregated health state, regardless of the input filters. + :param replica_filters: Defines a list of filters that specify which + replicas to be included in the returned cluster health chunk as children + of the parent partition. The replicas are returned only if the parent + partition matches a filter. + If the list is empty, no replicas are returned. All the replicas are used + to evaluate the parent partition aggregated health state, regardless of + the input filters. The partition filter may specify multiple replica filters. - For example, it can specify a filter to return all replicas with health state Error and - another filter to always include a replica identified by its replica id. - :type replica_filters: list[~azure.servicefabric.models.ReplicaHealthStateFilter] + For example, it can specify a filter to return all replicas with health + state Error and another filter to always include a replica identified by + its replica id. + :type replica_filters: + list[~azure.servicefabric.models.ReplicaHealthStateFilter] """ _attribute_map = { @@ -16683,10 +15498,7 @@ class PartitionHealthStateFilter(msrest.serialization.Model): 'replica_filters': {'key': 'ReplicaFilters', 'type': '[ReplicaHealthStateFilter]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionHealthStateFilter, self).__init__(**kwargs) self.partition_id_filter = kwargs.get('partition_id_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) @@ -16694,20 +15506,21 @@ def __init__( class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): - """Represents a scaling mechanism for adding or removing instances of stateless service partition. + """Represents a scaling mechanism for adding or removing instances of + stateless service partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. - Possible values include: "Invalid", "PartitionInstanceCount", - "AddRemoveIncrementalNamedPartition". - :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind - :param min_instance_count: Required. Minimum number of instances of the partition. + :param kind: Required. Constant filled by server. + :type kind: str + :param min_instance_count: Required. Minimum number of instances of the + partition. :type min_instance_count: int - :param max_instance_count: Required. Maximum number of instances of the partition. + :param max_instance_count: Required. Maximum number of instances of the + partition. :type max_instance_count: int - :param scale_increment: Required. The number of instances to add or remove during a scaling - operation. + :param scale_increment: Required. The number of instances to add or remove + during a scaling operation. :type scale_increment: int """ @@ -16725,31 +15538,32 @@ class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionInstanceCountScaleMechanism, self).__init__(**kwargs) - self.kind = 'PartitionInstanceCount' # type: str - self.min_instance_count = kwargs['min_instance_count'] - self.max_instance_count = kwargs['max_instance_count'] - self.scale_increment = kwargs['scale_increment'] + self.min_instance_count = kwargs.get('min_instance_count', None) + self.max_instance_count = kwargs.get('max_instance_count', None) + self.scale_increment = kwargs.get('scale_increment', None) + self.kind = 'PartitionInstanceCount' -class PartitionLoadInformation(msrest.serialization.Model): - """Represents load information for a partition, which contains the primary and secondary reported load metrics. -In case there is no load reported, PartitionLoadInformation will contain the default load for the service of the partition. -For default loads, LoadMetricReport's LastReportedUtc is set to 0. +class PartitionLoadInformation(Model): + """Represents load information for a partition, which contains the primary and + secondary reported load metrics. + In case there is no load reported, PartitionLoadInformation will contain + the default load for the service of the partition. + For default loads, LoadMetricReport's LastReportedUtc is set to 0. :param partition_id: Id of the partition. :type partition_id: str - :param primary_load_metric_reports: Array of load reports from the primary replica for this - partition. - :type primary_load_metric_reports: list[~azure.servicefabric.models.LoadMetricReport] - :param secondary_load_metric_reports: Array of aggregated load reports from all secondary - replicas for this partition. + :param primary_load_metric_reports: Array of load reports from the primary + replica for this partition. + :type primary_load_metric_reports: + list[~azure.servicefabric.models.LoadMetricReport] + :param secondary_load_metric_reports: Array of aggregated load reports + from all secondary replicas for this partition. Array only contains the latest reported load for each metric. - :type secondary_load_metric_reports: list[~azure.servicefabric.models.LoadMetricReport] + :type secondary_load_metric_reports: + list[~azure.servicefabric.models.LoadMetricReport] """ _attribute_map = { @@ -16758,30 +15572,31 @@ class PartitionLoadInformation(msrest.serialization.Model): 'secondary_load_metric_reports': {'key': 'SecondaryLoadMetricReports', 'type': '[LoadMetricReport]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionLoadInformation, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.primary_load_metric_reports = kwargs.get('primary_load_metric_reports', None) self.secondary_load_metric_reports = kwargs.get('secondary_load_metric_reports', None) -class PartitionMetricLoadDescription(msrest.serialization.Model): - """Represents load information for a partition, which contains the metrics load information about primary, all secondary replicas/instances or a specific secondary replica/instance located on a specific node. +class PartitionMetricLoadDescription(Model): + """Represents load information for a partition, which contains the metrics + load information about primary, all secondary replicas/instances or a + specific secondary replica/instance located on a specific node. :param partition_id: Id of the partition. :type partition_id: str - :param primary_replica_load_entries: Partition's load information for primary replica, in case - partition is from a stateful service. - :type primary_replica_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] - :param secondary_replicas_or_instances_load_entries: Partition's load information for all - secondary replicas or instances. + :param primary_replica_load_entries: Partition's load information for + primary replica, in case partition is from a stateful service. + :type primary_replica_load_entries: + list[~azure.servicefabric.models.MetricLoadDescription] + :param secondary_replicas_or_instances_load_entries: Partition's load + information for all secondary replicas or instances. :type secondary_replicas_or_instances_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] - :param secondary_replica_or_instance_load_entries_per_node: Partition's load information for a - specific secondary replica or instance located on a specific node. + :param secondary_replica_or_instance_load_entries_per_node: Partition's + load information for a specific secondary replica or instance located on a + specific node. :type secondary_replica_or_instance_load_entries_per_node: list[~azure.servicefabric.models.ReplicaMetricLoadDescription] """ @@ -16793,10 +15608,7 @@ class PartitionMetricLoadDescription(msrest.serialization.Model): 'secondary_replica_or_instance_load_entries_per_node': {'key': 'SecondaryReplicaOrInstanceLoadEntriesPerNode', 'type': '[ReplicaMetricLoadDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionMetricLoadDescription, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.primary_replica_load_entries = kwargs.get('primary_replica_load_entries', None) @@ -16809,42 +15621,23 @@ class PartitionNewHealthReportEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param source_id: Required. Id of report source. :type source_id: str @@ -16858,16 +15651,17 @@ class PartitionNewHealthReportEvent(PartitionEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, @@ -16880,11 +15674,11 @@ class PartitionNewHealthReportEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, @@ -16896,20 +15690,17 @@ class PartitionNewHealthReportEvent(PartitionEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionNewHealthReportEvent, self).__init__(**kwargs) - self.kind = 'PartitionNewHealthReport' # type: str - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'PartitionNewHealthReport' class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): @@ -16917,47 +15708,28 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param metadata: Required. Metadata about an Analysis Event. :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata :param when_move_completed: Required. Time when the move was completed. - :type when_move_completed: ~datetime.datetime + :type when_move_completed: datetime :param previous_node: Required. The name of a Service Fabric node. :type previous_node: str :param current_node: Required. The name of a Service Fabric node. @@ -16969,9 +15741,9 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'metadata': {'required': True}, 'when_move_completed': {'required': True}, @@ -16982,11 +15754,11 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, 'when_move_completed': {'key': 'WhenMoveCompleted', 'type': 'iso-8601'}, @@ -16996,28 +15768,27 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): 'relevant_traces': {'key': 'RelevantTraces', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionPrimaryMoveAnalysisEvent, self).__init__(**kwargs) - self.kind = 'PartitionPrimaryMoveAnalysis' # type: str - self.when_move_completed = kwargs['when_move_completed'] - self.previous_node = kwargs['previous_node'] - self.current_node = kwargs['current_node'] - self.move_reason = kwargs['move_reason'] - self.relevant_traces = kwargs['relevant_traces'] + self.when_move_completed = kwargs.get('when_move_completed', None) + self.previous_node = kwargs.get('previous_node', None) + self.current_node = kwargs.get('current_node', None) + self.move_reason = kwargs.get('move_reason', None) + self.relevant_traces = kwargs.get('relevant_traces', None) + self.kind = 'PartitionPrimaryMoveAnalysis' -class PartitionQuorumLossProgress(msrest.serialization.Model): +class PartitionQuorumLossProgress(Model): """Information about a partition quorum loss user-induced operation. - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param invoke_quorum_loss_result: Represents information about an operation in a terminal state - (Completed or Faulted). - :type invoke_quorum_loss_result: ~azure.servicefabric.models.InvokeQuorumLossResult + :param invoke_quorum_loss_result: Represents information about an + operation in a terminal state (Completed or Faulted). + :type invoke_quorum_loss_result: + ~azure.servicefabric.models.InvokeQuorumLossResult """ _attribute_map = { @@ -17025,10 +15796,7 @@ class PartitionQuorumLossProgress(msrest.serialization.Model): 'invoke_quorum_loss_result': {'key': 'InvokeQuorumLossResult', 'type': 'InvokeQuorumLossResult'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionQuorumLossProgress, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.invoke_quorum_loss_result = kwargs.get('invoke_quorum_loss_result', None) @@ -17039,42 +15807,23 @@ class PartitionReconfiguredEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -17105,9 +15854,9 @@ class PartitionReconfiguredEvent(PartitionEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, @@ -17125,11 +15874,11 @@ class PartitionReconfiguredEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, @@ -17146,36 +15895,35 @@ class PartitionReconfiguredEvent(PartitionEvent): 'total_duration_ms': {'key': 'TotalDurationMs', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionReconfiguredEvent, self).__init__(**kwargs) - self.kind = 'PartitionReconfigured' # type: str - self.node_name = kwargs['node_name'] - self.node_instance_id = kwargs['node_instance_id'] - self.service_type = kwargs['service_type'] - self.cc_epoch_data_loss_version = kwargs['cc_epoch_data_loss_version'] - self.cc_epoch_config_version = kwargs['cc_epoch_config_version'] - self.reconfig_type = kwargs['reconfig_type'] - self.result = kwargs['result'] - self.phase0_duration_ms = kwargs['phase0_duration_ms'] - self.phase1_duration_ms = kwargs['phase1_duration_ms'] - self.phase2_duration_ms = kwargs['phase2_duration_ms'] - self.phase3_duration_ms = kwargs['phase3_duration_ms'] - self.phase4_duration_ms = kwargs['phase4_duration_ms'] - self.total_duration_ms = kwargs['total_duration_ms'] - - -class PartitionRestartProgress(msrest.serialization.Model): + self.node_name = kwargs.get('node_name', None) + self.node_instance_id = kwargs.get('node_instance_id', None) + self.service_type = kwargs.get('service_type', None) + self.cc_epoch_data_loss_version = kwargs.get('cc_epoch_data_loss_version', None) + self.cc_epoch_config_version = kwargs.get('cc_epoch_config_version', None) + self.reconfig_type = kwargs.get('reconfig_type', None) + self.result = kwargs.get('result', None) + self.phase0_duration_ms = kwargs.get('phase0_duration_ms', None) + self.phase1_duration_ms = kwargs.get('phase1_duration_ms', None) + self.phase2_duration_ms = kwargs.get('phase2_duration_ms', None) + self.phase3_duration_ms = kwargs.get('phase3_duration_ms', None) + self.phase4_duration_ms = kwargs.get('phase4_duration_ms', None) + self.total_duration_ms = kwargs.get('total_duration_ms', None) + self.kind = 'PartitionReconfigured' + + +class PartitionRestartProgress(Model): """Information about a partition restart user-induced operation. - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param restart_partition_result: Represents information about an operation in a terminal state - (Completed or Faulted). - :type restart_partition_result: ~azure.servicefabric.models.RestartPartitionResult + :param restart_partition_result: Represents information about an operation + in a terminal state (Completed or Faulted). + :type restart_partition_result: + ~azure.servicefabric.models.RestartPartitionResult """ _attribute_map = { @@ -17183,46 +15931,43 @@ class PartitionRestartProgress(msrest.serialization.Model): 'restart_partition_result': {'key': 'RestartPartitionResult', 'type': 'RestartPartitionResult'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionRestartProgress, self).__init__(**kwargs) self.state = kwargs.get('state', None) self.restart_partition_result = kwargs.get('restart_partition_result', None) class PartitionsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for the partitions of a service, containing health evaluations for each unhealthy partition that impacts current aggregated health state. Can be returned when evaluating service health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for the partitions of a service, containing + health evaluations for each unhealthy partition that impacts current + aggregated health state. Can be returned when evaluating service health and + the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_partitions_per_service: Maximum allowed percentage of unhealthy - partitions per service from the ServiceTypeHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_partitions_per_service: Maximum allowed + percentage of unhealthy partitions per service from the + ServiceTypeHealthPolicy. :type max_percent_unhealthy_partitions_per_service: int - :param total_count: Total number of partitions of the service from the health store. + :param total_count: Total number of partitions of the service from the + health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy PartitionHealthEvaluation that impacted the aggregated - health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + PartitionHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -17230,37 +15975,35 @@ class PartitionsHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_partitions_per_service': {'key': 'MaxPercentUnhealthyPartitionsPerService', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PartitionsHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Partitions' # type: str self.max_percent_unhealthy_partitions_per_service = kwargs.get('max_percent_unhealthy_partitions_per_service', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Partitions' -class ReplicatorStatus(msrest.serialization.Model): +class ReplicatorStatus(Model): """Represents a base class for primary or secondary replicator status. -Contains information about the service fabric replicator like the replication/copy queue utilization, last acknowledgement received timestamp, etc. + Contains information about the service fabric replicator like the + replication/copy queue utilization, last acknowledgement received + timestamp, etc. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus. + sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus All required parameters must be populated in order to send to Azure. - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -17275,27 +16018,27 @@ class ReplicatorStatus(msrest.serialization.Model): 'kind': {'Primary': 'PrimaryReplicatorStatus', 'SecondaryReplicatorStatus': 'SecondaryReplicatorStatus'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicatorStatus, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class PrimaryReplicatorStatus(ReplicatorStatus): - """Provides statistics about the Service Fabric Replicator, when it is functioning in a Primary role. + """Provides statistics about the Service Fabric Replicator, when it is + functioning in a Primary role. All required parameters must be populated in order to send to Azure. - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole - :param replication_queue_status: Details about the replication queue on the primary replicator. - :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param remote_replicators: The status of all the active and idle secondary replicators that the - primary is aware of. - :type remote_replicators: list[~azure.servicefabric.models.RemoteReplicatorStatus] + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the primary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param remote_replicators: The status of all the active and idle secondary + replicators that the primary is aware of. + :type remote_replicators: + list[~azure.servicefabric.models.RemoteReplicatorStatus] """ _validation = { @@ -17308,31 +16051,30 @@ class PrimaryReplicatorStatus(ReplicatorStatus): 'remote_replicators': {'key': 'RemoteReplicators', 'type': '[RemoteReplicatorStatus]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PrimaryReplicatorStatus, self).__init__(**kwargs) - self.kind = 'Primary' # type: str self.replication_queue_status = kwargs.get('replication_queue_status', None) self.remote_replicators = kwargs.get('remote_replicators', None) + self.kind = 'Primary' -class Probe(msrest.serialization.Model): +class Probe(Model): """Probes have a number of fields that you can use to control their behavior. - :param initial_delay_seconds: The initial delay in seconds to start executing probe once - codepackage has started. + :param initial_delay_seconds: The initial delay in seconds to start + executing probe once codepackage has started. Default value: 0 . :type initial_delay_seconds: int - :param period_seconds: Periodic seconds to execute probe. + :param period_seconds: Periodic seconds to execute probe. Default value: + 10 . :type period_seconds: int - :param timeout_seconds: Period after which probe is considered as failed if it hasn't completed - successfully. + :param timeout_seconds: Period after which probe is considered as failed + if it hasn't completed successfully. Default value: 1 . :type timeout_seconds: int - :param success_threshold: The count of successful probe executions after which probe is - considered success. + :param success_threshold: The count of successful probe executions after + which probe is considered success. Default value: 1 . :type success_threshold: int - :param failure_threshold: The count of failures after which probe is considered failed. + :param failure_threshold: The count of failures after which probe is + considered failed. Default value: 3 . :type failure_threshold: int :param exec_property: Exec command to run inside the container. :type exec_property: ~azure.servicefabric.models.ProbeExec @@ -17353,10 +16095,7 @@ class Probe(msrest.serialization.Model): 'tcp_socket': {'key': 'tcpSocket', 'type': 'ProbeTcpSocket'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(Probe, self).__init__(**kwargs) self.initial_delay_seconds = kwargs.get('initial_delay_seconds', 0) self.period_seconds = kwargs.get('period_seconds', 10) @@ -17368,13 +16107,13 @@ def __init__( self.tcp_socket = kwargs.get('tcp_socket', None) -class ProbeExec(msrest.serialization.Model): +class ProbeExec(Model): """Exec command to run inside the container. All required parameters must be populated in order to send to Azure. - :param command: Required. Comma separated command to run inside the container for example "sh, - -c, echo hello world". + :param command: Required. Comma separated command to run inside the + container for example "sh, -c, echo hello world". :type command: str """ @@ -17386,15 +16125,12 @@ class ProbeExec(msrest.serialization.Model): 'command': {'key': 'command', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ProbeExec, self).__init__(**kwargs) - self.command = kwargs['command'] + self.command = kwargs.get('command', None) -class ProbeHttpGet(msrest.serialization.Model): +class ProbeHttpGet(Model): """Http probe for the container. All required parameters must be populated in order to send to Azure. @@ -17407,8 +16143,8 @@ class ProbeHttpGet(msrest.serialization.Model): :type host: str :param http_headers: Headers to set in the request. :type http_headers: list[~azure.servicefabric.models.ProbeHttpGetHeaders] - :param scheme: Scheme for the http probe. Can be Http or Https. Possible values include: - "http", "https". + :param scheme: Scheme for the http probe. Can be Http or Https. Possible + values include: 'http', 'https' :type scheme: str or ~azure.servicefabric.models.Scheme """ @@ -17424,19 +16160,16 @@ class ProbeHttpGet(msrest.serialization.Model): 'scheme': {'key': 'scheme', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ProbeHttpGet, self).__init__(**kwargs) - self.port = kwargs['port'] + self.port = kwargs.get('port', None) self.path = kwargs.get('path', None) self.host = kwargs.get('host', None) self.http_headers = kwargs.get('http_headers', None) self.scheme = kwargs.get('scheme', None) -class ProbeHttpGetHeaders(msrest.serialization.Model): +class ProbeHttpGetHeaders(Model): """Http headers. All required parameters must be populated in order to send to Azure. @@ -17457,16 +16190,13 @@ class ProbeHttpGetHeaders(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ProbeHttpGetHeaders, self).__init__(**kwargs) - self.name = kwargs['name'] - self.value = kwargs['value'] + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) -class ProbeTcpSocket(msrest.serialization.Model): +class ProbeTcpSocket(Model): """Tcp port to probe inside the container. All required parameters must be populated in order to send to Azure. @@ -17483,16 +16213,14 @@ class ProbeTcpSocket(msrest.serialization.Model): 'port': {'key': 'port', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ProbeTcpSocket, self).__init__(**kwargs) - self.port = kwargs['port'] + self.port = kwargs.get('port', None) -class PropertyBatchDescriptionList(msrest.serialization.Model): - """Describes a list of property batch operations to be executed. Either all or none of the operations will be committed. +class PropertyBatchDescriptionList(Model): + """Describes a list of property batch operations to be executed. Either all or + none of the operations will be committed. :param operations: A list of the property batch operations to be executed. :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] @@ -17502,23 +16230,20 @@ class PropertyBatchDescriptionList(msrest.serialization.Model): 'operations': {'key': 'Operations', 'type': '[PropertyBatchOperation]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PropertyBatchDescriptionList, self).__init__(**kwargs) self.operations = kwargs.get('operations', None) -class PropertyDescription(msrest.serialization.Model): +class PropertyDescription(Model): """Description of a Service Fabric property. All required parameters must be populated in order to send to Azure. :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param custom_type_id: The property's custom type ID. Using this property, the user is able to - tag the type of the value of the property. + :param custom_type_id: The property's custom type ID. Using this property, + the user is able to tag the type of the value of the property. :type custom_type_id: str :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue @@ -17535,17 +16260,14 @@ class PropertyDescription(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PropertyDescription, self).__init__(**kwargs) - self.property_name = kwargs['property_name'] + self.property_name = kwargs.get('property_name', None) self.custom_type_id = kwargs.get('custom_type_id', None) - self.value = kwargs['value'] + self.value = kwargs.get('value', None) -class PropertyInfo(msrest.serialization.Model): +class PropertyInfo(Model): """Information about a Service Fabric property. All required parameters must be populated in order to send to Azure. @@ -17554,8 +16276,8 @@ class PropertyInfo(msrest.serialization.Model): :type name: str :param value: Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue - :param metadata: Required. The metadata associated with a property, including the property's - name. + :param metadata: Required. The metadata associated with a property, + including the property's name. :type metadata: ~azure.servicefabric.models.PropertyMetadata """ @@ -17570,35 +16292,33 @@ class PropertyInfo(msrest.serialization.Model): 'metadata': {'key': 'Metadata', 'type': 'PropertyMetadata'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PropertyInfo, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) - self.metadata = kwargs['metadata'] + self.metadata = kwargs.get('metadata', None) -class PropertyMetadata(msrest.serialization.Model): +class PropertyMetadata(Model): """The metadata associated with a property, including the property's name. - :param type_id: The kind of property, determined by the type of data. Following are the - possible values. Possible values include: "Invalid", "Binary", "Int64", "Double", "String", - "Guid". + :param type_id: The kind of property, determined by the type of data. + Following are the possible values. Possible values include: 'Invalid', + 'Binary', 'Int64', 'Double', 'String', 'Guid' :type type_id: str or ~azure.servicefabric.models.PropertyValueKind :param custom_type_id: The property's custom type ID. :type custom_type_id: str - :param parent: The name of the parent Service Fabric Name for the property. It could be thought - of as the name-space/table under which the property exists. + :param parent: The name of the parent Service Fabric Name for the + property. It could be thought of as the name-space/table under which the + property exists. :type parent: str :param size_in_bytes: The length of the serialized property value. :type size_in_bytes: int - :param last_modified_utc_timestamp: Represents when the Property was last modified. Only write - operations will cause this field to be updated. - :type last_modified_utc_timestamp: ~datetime.datetime - :param sequence_number: The version of the property. Every time a property is modified, its - sequence number is increased. + :param last_modified_utc_timestamp: Represents when the Property was last + modified. Only write operations will cause this field to be updated. + :type last_modified_utc_timestamp: datetime + :param sequence_number: The version of the property. Every time a property + is modified, its sequence number is increased. :type sequence_number: str """ @@ -17611,10 +16331,7 @@ class PropertyMetadata(msrest.serialization.Model): 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PropertyMetadata, self).__init__(**kwargs) self.type_id = kwargs.get('type_id', None) self.custom_type_id = kwargs.get('custom_type_id', None) @@ -17625,54 +16342,52 @@ def __init__( class ProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): - """Describes the operation to register or provision an application type using an application package uploaded to the Service Fabric image store. + """Describes the operation to register or provision an application type using + an application package uploaded to the Service Fabric image store. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of application type registration or provision requested. The - application package can be registered or provisioned either from the image store or from an - external store. Following are the kinds of the application type provision.Constant filled by - server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". - :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind - :param async_property: Required. Indicates whether or not provisioning should occur - asynchronously. When set to true, the provision operation returns when the request is accepted - by the system, and the provision operation continues without any timeout limit. The default - value is false. For large application packages, we recommend setting the value to true. + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. :type async_property: bool - :param application_type_build_path: Required. The relative path for the application package in - the image store specified during the prior upload operation. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_type_build_path: Required. The relative path for the + application package in the image store specified during the prior upload + operation. :type application_type_build_path: str - :param application_package_cleanup_policy: The kind of action that needs to be taken for - cleaning up the application package after successful provision. Possible values include: - "Invalid", "Default", "Automatic", "Manual". + :param application_package_cleanup_policy: The kind of action that needs + to be taken for cleaning up the application package after successful + provision. Possible values include: 'Invalid', 'Default', 'Automatic', + 'Manual' :type application_package_cleanup_policy: str or ~azure.servicefabric.models.ApplicationPackageCleanupPolicy """ _validation = { - 'kind': {'required': True}, 'async_property': {'required': True}, + 'kind': {'required': True}, 'application_type_build_path': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'async_property': {'key': 'Async', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, 'application_package_cleanup_policy': {'key': 'ApplicationPackageCleanupPolicy', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ProvisionApplicationTypeDescription, self).__init__(**kwargs) - self.kind = 'ImageStorePath' # type: str - self.application_type_build_path = kwargs['application_type_build_path'] + self.application_type_build_path = kwargs.get('application_type_build_path', None) self.application_package_cleanup_policy = kwargs.get('application_package_cleanup_policy', None) + self.kind = 'ImageStorePath' -class ProvisionFabricDescription(msrest.serialization.Model): +class ProvisionFabricDescription(Model): """Describes the parameters for provisioning a cluster. :param code_file_path: The cluster code package file path. @@ -17686,10 +16401,7 @@ class ProvisionFabricDescription(msrest.serialization.Model): 'cluster_manifest_file_path': {'key': 'ClusterManifestFilePath', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ProvisionFabricDescription, self).__init__(**kwargs) self.code_file_path = kwargs.get('code_file_path', None) self.cluster_manifest_file_path = kwargs.get('cluster_manifest_file_path', None) @@ -17697,64 +16409,66 @@ def __init__( class PutPropertyBatchOperation(PropertyBatchOperation): """Puts the specified property under the specified name. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue - :param custom_type_id: The property's custom type ID. Using this property, the user is able to - tag the type of the value of the property. + :param custom_type_id: The property's custom type ID. Using this property, + the user is able to tag the type of the value of the property. :type custom_type_id: str """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, 'value': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'PropertyValue'}, 'custom_type_id': {'key': 'CustomTypeId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(PutPropertyBatchOperation, self).__init__(**kwargs) - self.kind = 'Put' # type: str - self.value = kwargs['value'] + self.value = kwargs.get('value', None) self.custom_type_id = kwargs.get('custom_type_id', None) - - -class ReconfigurationInformation(msrest.serialization.Model): - """Information about current reconfiguration like phase, type, previous configuration role of replica and reconfiguration start date time. - - :param previous_configuration_role: Replica role before reconfiguration started. Possible - values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type previous_configuration_role: str or ~azure.servicefabric.models.ReplicaRole - :param reconfiguration_phase: Current phase of ongoing reconfiguration. If no reconfiguration - is taking place then this value will be "None". Possible values include: "Unknown", "None", - "Phase0", "Phase1", "Phase2", "Phase3", "Phase4", "AbortPhaseZero". - :type reconfiguration_phase: str or ~azure.servicefabric.models.ReconfigurationPhase - :param reconfiguration_type: Type of current ongoing reconfiguration. If no reconfiguration is - taking place then this value will be "None". Possible values include: "Unknown", "SwapPrimary", - "Failover", "Other". - :type reconfiguration_type: str or ~azure.servicefabric.models.ReconfigurationType - :param reconfiguration_start_time_utc: Start time (in UTC) of the ongoing reconfiguration. If - no reconfiguration is taking place then this value will be zero date-time. - :type reconfiguration_start_time_utc: ~datetime.datetime + self.kind = 'Put' + + +class ReconfigurationInformation(Model): + """Information about current reconfiguration like phase, type, previous + configuration role of replica and reconfiguration start date time. + + :param previous_configuration_role: Replica role before reconfiguration + started. Possible values include: 'Unknown', 'None', 'Primary', + 'IdleSecondary', 'ActiveSecondary' + :type previous_configuration_role: str or + ~azure.servicefabric.models.ReplicaRole + :param reconfiguration_phase: Current phase of ongoing reconfiguration. If + no reconfiguration is taking place then this value will be "None". + Possible values include: 'Unknown', 'None', 'Phase0', 'Phase1', 'Phase2', + 'Phase3', 'Phase4', 'AbortPhaseZero' + :type reconfiguration_phase: str or + ~azure.servicefabric.models.ReconfigurationPhase + :param reconfiguration_type: Type of current ongoing reconfiguration. If + no reconfiguration is taking place then this value will be "None". + Possible values include: 'Unknown', 'SwapPrimary', 'Failover', 'Other' + :type reconfiguration_type: str or + ~azure.servicefabric.models.ReconfigurationType + :param reconfiguration_start_time_utc: Start time (in UTC) of the ongoing + reconfiguration. If no reconfiguration is taking place then this value + will be zero date-time. + :type reconfiguration_start_time_utc: datetime """ _attribute_map = { @@ -17764,10 +16478,7 @@ class ReconfigurationInformation(msrest.serialization.Model): 'reconfiguration_start_time_utc': {'key': 'ReconfigurationStartTimeUtc', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReconfigurationInformation, self).__init__(**kwargs) self.previous_configuration_role = kwargs.get('previous_configuration_role', None) self.reconfiguration_phase = kwargs.get('reconfiguration_phase', None) @@ -17775,14 +16486,16 @@ def __init__( self.reconfiguration_start_time_utc = kwargs.get('reconfiguration_start_time_utc', None) -class RegistryCredential(msrest.serialization.Model): +class RegistryCredential(Model): """Credential information to connect to container registry. :param registry_user_name: The user name to connect to container registry. :type registry_user_name: str - :param registry_password: The password for supplied username to connect to container registry. + :param registry_password: The password for supplied username to connect to + container registry. :type registry_password: str - :param password_encrypted: Indicates that supplied container registry password is encrypted. + :param password_encrypted: Indicates that supplied container registry + password is encrypted. :type password_encrypted: bool """ @@ -17792,27 +16505,25 @@ class RegistryCredential(msrest.serialization.Model): 'password_encrypted': {'key': 'PasswordEncrypted', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RegistryCredential, self).__init__(**kwargs) self.registry_user_name = kwargs.get('registry_user_name', None) self.registry_password = kwargs.get('registry_password', None) self.password_encrypted = kwargs.get('password_encrypted', None) -class ReliableCollectionsRef(msrest.serialization.Model): +class ReliableCollectionsRef(Model): """Specifying this parameter adds support for reliable collections. All required parameters must be populated in order to send to Azure. - :param name: Required. Name of ReliableCollection resource. Right now it's not used and you can - use any string. + :param name: Required. Name of ReliableCollection resource. Right now it's + not used and you can use any string. :type name: str - :param do_not_persist_state: False (the default) if ReliableCollections state is persisted to - disk as usual. True if you do not want to persist state, in which case replication is still - enabled and you can use ReliableCollections as distributed cache. + :param do_not_persist_state: False (the default) if ReliableCollections + state is persisted to disk as usual. True if you do not want to persist + state, in which case replication is still enabled and you can use + ReliableCollections as distributed cache. :type do_not_persist_state: bool """ @@ -17825,29 +16536,28 @@ class ReliableCollectionsRef(msrest.serialization.Model): 'do_not_persist_state': {'key': 'doNotPersistState', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReliableCollectionsRef, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.do_not_persist_state = kwargs.get('do_not_persist_state', None) -class RemoteReplicatorAcknowledgementDetail(msrest.serialization.Model): - """Provides various statistics of the acknowledgements that are being received from the remote replicator. +class RemoteReplicatorAcknowledgementDetail(Model): + """Provides various statistics of the acknowledgements that are being received + from the remote replicator. - :param average_receive_duration: Represents the average duration it takes for the remote - replicator to receive an operation. + :param average_receive_duration: Represents the average duration it takes + for the remote replicator to receive an operation. :type average_receive_duration: str - :param average_apply_duration: Represents the average duration it takes for the remote - replicator to apply an operation. This usually entails writing the operation to disk. + :param average_apply_duration: Represents the average duration it takes + for the remote replicator to apply an operation. This usually entails + writing the operation to disk. :type average_apply_duration: str - :param not_received_count: Represents the number of operations not yet received by a remote - replicator. + :param not_received_count: Represents the number of operations not yet + received by a remote replicator. :type not_received_count: str - :param received_and_not_applied_count: Represents the number of operations received and not yet - applied by a remote replicator. + :param received_and_not_applied_count: Represents the number of operations + received and not yet applied by a remote replicator. :type received_and_not_applied_count: str """ @@ -17858,10 +16568,7 @@ class RemoteReplicatorAcknowledgementDetail(msrest.serialization.Model): 'received_and_not_applied_count': {'key': 'ReceivedAndNotAppliedCount', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RemoteReplicatorAcknowledgementDetail, self).__init__(**kwargs) self.average_receive_duration = kwargs.get('average_receive_duration', None) self.average_apply_duration = kwargs.get('average_apply_duration', None) @@ -17869,15 +16576,17 @@ def __init__( self.received_and_not_applied_count = kwargs.get('received_and_not_applied_count', None) -class RemoteReplicatorAcknowledgementStatus(msrest.serialization.Model): - """Provides details about the remote replicators from the primary replicator's point of view. +class RemoteReplicatorAcknowledgementStatus(Model): + """Provides details about the remote replicators from the primary replicator's + point of view. - :param replication_stream_acknowledgement_detail: Details about the acknowledgements for - operations that are part of the replication stream data. + :param replication_stream_acknowledgement_detail: Details about the + acknowledgements for operations that are part of the replication stream + data. :type replication_stream_acknowledgement_detail: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail - :param copy_stream_acknowledgement_detail: Details about the acknowledgements for operations - that are part of the copy stream data. + :param copy_stream_acknowledgement_detail: Details about the + acknowledgements for operations that are part of the copy stream data. :type copy_stream_acknowledgement_detail: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail """ @@ -17887,45 +16596,46 @@ class RemoteReplicatorAcknowledgementStatus(msrest.serialization.Model): 'copy_stream_acknowledgement_detail': {'key': 'CopyStreamAcknowledgementDetail', 'type': 'RemoteReplicatorAcknowledgementDetail'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RemoteReplicatorAcknowledgementStatus, self).__init__(**kwargs) self.replication_stream_acknowledgement_detail = kwargs.get('replication_stream_acknowledgement_detail', None) self.copy_stream_acknowledgement_detail = kwargs.get('copy_stream_acknowledgement_detail', None) -class RemoteReplicatorStatus(msrest.serialization.Model): - """Represents the state of the secondary replicator from the primary replicator’s point of view. +class RemoteReplicatorStatus(Model): + """Represents the state of the secondary replicator from the primary + replicator’s point of view. - :param replica_id: Represents the replica ID of the remote secondary replicator. + :param replica_id: Represents the replica ID of the remote secondary + replicator. :type replica_id: str - :param last_acknowledgement_processed_time_utc: The last timestamp (in UTC) when an - acknowledgement from the secondary replicator was processed on the primary. - UTC 0 represents an invalid value, indicating that no acknowledgement messages were ever - processed. - :type last_acknowledgement_processed_time_utc: ~datetime.datetime - :param last_received_replication_sequence_number: The highest replication operation sequence - number that the secondary has received from the primary. + :param last_acknowledgement_processed_time_utc: The last timestamp (in + UTC) when an acknowledgement from the secondary replicator was processed + on the primary. + UTC 0 represents an invalid value, indicating that no acknowledgement + messages were ever processed. + :type last_acknowledgement_processed_time_utc: datetime + :param last_received_replication_sequence_number: The highest replication + operation sequence number that the secondary has received from the + primary. :type last_received_replication_sequence_number: str - :param last_applied_replication_sequence_number: The highest replication operation sequence - number that the secondary has applied to its state. + :param last_applied_replication_sequence_number: The highest replication + operation sequence number that the secondary has applied to its state. :type last_applied_replication_sequence_number: str - :param is_in_build: A value that indicates whether the secondary replica is in the process of - being built. + :param is_in_build: A value that indicates whether the secondary replica + is in the process of being built. :type is_in_build: bool - :param last_received_copy_sequence_number: The highest copy operation sequence number that the - secondary has received from the primary. + :param last_received_copy_sequence_number: The highest copy operation + sequence number that the secondary has received from the primary. A value of -1 implies that the secondary has received all copy operations. :type last_received_copy_sequence_number: str - :param last_applied_copy_sequence_number: The highest copy operation sequence number that the - secondary has applied to its state. - A value of -1 implies that the secondary has applied all copy operations and the copy process - is complete. + :param last_applied_copy_sequence_number: The highest copy operation + sequence number that the secondary has applied to its state. + A value of -1 implies that the secondary has applied all copy operations + and the copy process is complete. :type last_applied_copy_sequence_number: str - :param remote_replicator_acknowledgement_status: Represents the acknowledgment status for the - remote secondary replicator. + :param remote_replicator_acknowledgement_status: Represents the + acknowledgment status for the remote secondary replicator. :type remote_replicator_acknowledgement_status: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementStatus """ @@ -17941,10 +16651,7 @@ class RemoteReplicatorStatus(msrest.serialization.Model): 'remote_replicator_acknowledgement_status': {'key': 'RemoteReplicatorAcknowledgementStatus', 'type': 'RemoteReplicatorAcknowledgementStatus'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RemoteReplicatorStatus, self).__init__(**kwargs) self.replica_id = kwargs.get('replica_id', None) self.last_acknowledgement_processed_time_utc = kwargs.get('last_acknowledgement_processed_time_utc', None) @@ -17956,87 +16663,95 @@ def __init__( self.remote_replicator_acknowledgement_status = kwargs.get('remote_replicator_acknowledgement_status', None) -class RepairTask(msrest.serialization.Model): - """Represents a repair task, which includes information about what kind of repair was requested, what its progress is, and what its final result was. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. +class RepairTask(Model): + """Represents a repair task, which includes information about what kind of + repair was requested, what its progress is, and what its final result was. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str :param version: The version of the repair task. - When creating a new repair task, the version must be set to zero. When updating a repair - task, + When creating a new repair task, the version must be set to zero. When + updating a repair task, the version is used for optimistic concurrency checks. If the version is - set to zero, the update will not check for write conflicts. If the version is set to a - non-zero value, then the - update will only succeed if the actual current version of the repair task matches this value. + set to zero, the update will not check for write conflicts. If the + version is set to a non-zero value, then the + update will only succeed if the actual current version of the repair task + matches this value. :type version: str - :param description: A description of the purpose of the repair task, or other informational - details. + :param description: A description of the purpose of the repair task, or + other informational details. May be set when the repair task is created, and is immutable once set. :type description: str - :param state: Required. The workflow state of the repair task. Valid initial states are - Created, Claimed, and Preparing. Possible values include: "Invalid", "Created", "Claimed", - "Preparing", "Approved", "Executing", "Restoring", "Completed". + :param state: Required. The workflow state of the repair task. Valid + initial states are Created, Claimed, and Preparing. Possible values + include: 'Invalid', 'Created', 'Claimed', 'Preparing', 'Approved', + 'Executing', 'Restoring', 'Completed' :type state: str or ~azure.servicefabric.models.State - :param flags: A bitwise-OR of the following values, which gives additional details about the - status of the repair task. - - - * 1 - Cancellation of the repair has been requested - * 2 - Abort of the repair has been requested - * 4 - Approval of the repair was forced via client request. + :param flags: A bitwise-OR of the following values, which gives additional + details about the status of the repair task. + - 1 - Cancellation of the repair has been requested + - 2 - Abort of the repair has been requested + - 4 - Approval of the repair was forced via client request :type flags: int - :param action: Required. The requested repair action. Must be specified when the repair task is - created, and is immutable once set. + :param action: Required. The requested repair action. Must be specified + when the repair task is created, and is immutable once set. :type action: str - :param target: The target object determines what actions the system will take to prepare for - the impact of the repair, prior to approving execution of the repair. + :param target: The target object determines what actions the system will + take to prepare for the impact of the repair, prior to approving execution + of the repair. May be set when the repair task is created, and is immutable once set. :type target: ~azure.servicefabric.models.RepairTargetDescriptionBase - :param executor: The name of the repair executor. Must be specified in Claimed and later - states, and is immutable once set. + :param executor: The name of the repair executor. Must be specified in + Claimed and later states, and is immutable once set. :type executor: str - :param executor_data: A data string that the repair executor can use to store its internal - state. + :param executor_data: A data string that the repair executor can use to + store its internal state. :type executor_data: str - :param impact: The impact object determines what actions the system will take to prepare for - the impact of the repair, prior to approving execution of the repair. - Impact must be specified by the repair executor when transitioning to the Preparing state, and - is immutable once set. + :param impact: The impact object determines what actions the system will + take to prepare for the impact of the repair, prior to approving execution + of the repair. + Impact must be specified by the repair executor when transitioning to the + Preparing state, and is immutable once set. :type impact: ~azure.servicefabric.models.RepairImpactDescriptionBase - :param result_status: A value describing the overall result of the repair task execution. Must - be specified in the Restoring and later states, and is immutable once set. Possible values - include: "Invalid", "Succeeded", "Cancelled", "Interrupted", "Failed", "Pending". + :param result_status: A value describing the overall result of the repair + task execution. Must be specified in the Restoring and later states, and + is immutable once set. Possible values include: 'Invalid', 'Succeeded', + 'Cancelled', 'Interrupted', 'Failed', 'Pending' :type result_status: str or ~azure.servicefabric.models.ResultStatus - :param result_code: A numeric value providing additional details about the result of the repair - task execution. - May be specified in the Restoring and later states, and is immutable once set. + :param result_code: A numeric value providing additional details about the + result of the repair task execution. + May be specified in the Restoring and later states, and is immutable once + set. :type result_code: int - :param result_details: A string providing additional details about the result of the repair - task execution. - May be specified in the Restoring and later states, and is immutable once set. + :param result_details: A string providing additional details about the + result of the repair task execution. + May be specified in the Restoring and later states, and is immutable once + set. :type result_details: str - :param history: An object that contains timestamps of the repair task's state transitions. - These timestamps are updated by the system, and cannot be directly modified. + :param history: An object that contains timestamps of the repair task's + state transitions. + These timestamps are updated by the system, and cannot be directly + modified. :type history: ~azure.servicefabric.models.RepairTaskHistory - :param preparing_health_check_state: The workflow state of the health check when the repair - task is in the Preparing state. Possible values include: "NotStarted", "InProgress", - "Succeeded", "Skipped", "TimedOut". + :param preparing_health_check_state: The workflow state of the health + check when the repair task is in the Preparing state. Possible values + include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' :type preparing_health_check_state: str or ~azure.servicefabric.models.RepairTaskHealthCheckState - :param restoring_health_check_state: The workflow state of the health check when the repair - task is in the Restoring state. Possible values include: "NotStarted", "InProgress", - "Succeeded", "Skipped", "TimedOut". + :param restoring_health_check_state: The workflow state of the health + check when the repair task is in the Restoring state. Possible values + include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' :type restoring_health_check_state: str or ~azure.servicefabric.models.RepairTaskHealthCheckState - :param perform_preparing_health_check: A value to determine if health checks will be performed - when the repair task enters the Preparing state. + :param perform_preparing_health_check: A value to determine if health + checks will be performed when the repair task enters the Preparing state. :type perform_preparing_health_check: bool - :param perform_restoring_health_check: A value to determine if health checks will be performed - when the repair task enters the Restoring state. + :param perform_restoring_health_check: A value to determine if health + checks will be performed when the repair task enters the Restoring state. :type perform_restoring_health_check: bool """ @@ -18067,17 +16782,14 @@ class RepairTask(msrest.serialization.Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RepairTask, self).__init__(**kwargs) - self.task_id = kwargs['task_id'] + self.task_id = kwargs.get('task_id', None) self.version = kwargs.get('version', None) self.description = kwargs.get('description', None) - self.state = kwargs['state'] + self.state = kwargs.get('state', None) self.flags = kwargs.get('flags', None) - self.action = kwargs['action'] + self.action = kwargs.get('action', None) self.target = kwargs.get('target', None) self.executor = kwargs.get('executor', None) self.executor_data = kwargs.get('executor_data', None) @@ -18092,18 +16804,19 @@ def __init__( self.perform_restoring_health_check = kwargs.get('perform_restoring_health_check', None) -class RepairTaskApproveDescription(msrest.serialization.Model): +class RepairTaskApproveDescription(Model): """Describes a request for forced approval of a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version check + is performed. :type version: str """ @@ -18116,31 +16829,29 @@ class RepairTaskApproveDescription(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RepairTaskApproveDescription, self).__init__(**kwargs) - self.task_id = kwargs['task_id'] + self.task_id = kwargs.get('task_id', None) self.version = kwargs.get('version', None) -class RepairTaskCancelDescription(msrest.serialization.Model): +class RepairTaskCancelDescription(Model): """Describes a request to cancel a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version check + is performed. :type version: str - :param request_abort: *True* if the repair should be stopped as soon as possible even if it has - already started executing. *False* if the repair should be cancelled only if execution has not - yet started. + :param request_abort: _True_ if the repair should be stopped as soon as + possible even if it has already started executing. _False_ if the repair + should be cancelled only if execution has not yet started. :type request_abort: bool """ @@ -18154,28 +16865,27 @@ class RepairTaskCancelDescription(msrest.serialization.Model): 'request_abort': {'key': 'RequestAbort', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RepairTaskCancelDescription, self).__init__(**kwargs) - self.task_id = kwargs['task_id'] + self.task_id = kwargs.get('task_id', None) self.version = kwargs.get('version', None) self.request_abort = kwargs.get('request_abort', None) -class RepairTaskDeleteDescription(msrest.serialization.Model): +class RepairTaskDeleteDescription(Model): """Describes a request to delete a completed repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. - :param task_id: Required. The ID of the completed repair task to be deleted. + :param task_id: Required. The ID of the completed repair task to be + deleted. :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version check + is performed. :type version: str """ @@ -18188,46 +16898,50 @@ class RepairTaskDeleteDescription(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RepairTaskDeleteDescription, self).__init__(**kwargs) - self.task_id = kwargs['task_id'] + self.task_id = kwargs.get('task_id', None) self.version = kwargs.get('version', None) -class RepairTaskHistory(msrest.serialization.Model): +class RepairTaskHistory(Model): """A record of the times when the repair task entered each state. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. - - :param created_utc_timestamp: The time when the repair task entered the Created state. - :type created_utc_timestamp: ~datetime.datetime - :param claimed_utc_timestamp: The time when the repair task entered the Claimed state. - :type claimed_utc_timestamp: ~datetime.datetime - :param preparing_utc_timestamp: The time when the repair task entered the Preparing state. - :type preparing_utc_timestamp: ~datetime.datetime - :param approved_utc_timestamp: The time when the repair task entered the Approved state. - :type approved_utc_timestamp: ~datetime.datetime - :param executing_utc_timestamp: The time when the repair task entered the Executing state. - :type executing_utc_timestamp: ~datetime.datetime - :param restoring_utc_timestamp: The time when the repair task entered the Restoring state. - :type restoring_utc_timestamp: ~datetime.datetime - :param completed_utc_timestamp: The time when the repair task entered the Completed state. - :type completed_utc_timestamp: ~datetime.datetime - :param preparing_health_check_start_utc_timestamp: The time when the repair task started the - health check in the Preparing state. - :type preparing_health_check_start_utc_timestamp: ~datetime.datetime - :param preparing_health_check_end_utc_timestamp: The time when the repair task completed the - health check in the Preparing state. - :type preparing_health_check_end_utc_timestamp: ~datetime.datetime - :param restoring_health_check_start_utc_timestamp: The time when the repair task started the - health check in the Restoring state. - :type restoring_health_check_start_utc_timestamp: ~datetime.datetime - :param restoring_health_check_end_utc_timestamp: The time when the repair task completed the - health check in the Restoring state. - :type restoring_health_check_end_utc_timestamp: ~datetime.datetime + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + :param created_utc_timestamp: The time when the repair task entered the + Created state. + :type created_utc_timestamp: datetime + :param claimed_utc_timestamp: The time when the repair task entered the + Claimed state. + :type claimed_utc_timestamp: datetime + :param preparing_utc_timestamp: The time when the repair task entered the + Preparing state. + :type preparing_utc_timestamp: datetime + :param approved_utc_timestamp: The time when the repair task entered the + Approved state + :type approved_utc_timestamp: datetime + :param executing_utc_timestamp: The time when the repair task entered the + Executing state + :type executing_utc_timestamp: datetime + :param restoring_utc_timestamp: The time when the repair task entered the + Restoring state + :type restoring_utc_timestamp: datetime + :param completed_utc_timestamp: The time when the repair task entered the + Completed state + :type completed_utc_timestamp: datetime + :param preparing_health_check_start_utc_timestamp: The time when the + repair task started the health check in the Preparing state. + :type preparing_health_check_start_utc_timestamp: datetime + :param preparing_health_check_end_utc_timestamp: The time when the repair + task completed the health check in the Preparing state. + :type preparing_health_check_end_utc_timestamp: datetime + :param restoring_health_check_start_utc_timestamp: The time when the + repair task started the health check in the Restoring state. + :type restoring_health_check_start_utc_timestamp: datetime + :param restoring_health_check_end_utc_timestamp: The time when the repair + task completed the health check in the Restoring state. + :type restoring_health_check_end_utc_timestamp: datetime """ _attribute_map = { @@ -18244,10 +16958,7 @@ class RepairTaskHistory(msrest.serialization.Model): 'restoring_health_check_end_utc_timestamp': {'key': 'RestoringHealthCheckEndUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RepairTaskHistory, self).__init__(**kwargs) self.created_utc_timestamp = kwargs.get('created_utc_timestamp', None) self.claimed_utc_timestamp = kwargs.get('claimed_utc_timestamp', None) @@ -18262,26 +16973,29 @@ def __init__( self.restoring_health_check_end_utc_timestamp = kwargs.get('restoring_health_check_end_utc_timestamp', None) -class RepairTaskUpdateHealthPolicyDescription(msrest.serialization.Model): +class RepairTaskUpdateHealthPolicyDescription(Model): """Describes a request to update the health policy of a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task to be updated. :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current value of the repair task. If zero, - then no version check is performed. + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current value of the repair task. If zero, then no version check is + performed. :type version: str - :param perform_preparing_health_check: A boolean indicating if health check is to be performed - in the Preparing stage of the repair task. If not specified the existing value should not be - altered. Otherwise, specify the desired new value. + :param perform_preparing_health_check: A boolean indicating if health + check is to be performed in the Preparing stage of the repair task. If not + specified the existing value should not be altered. Otherwise, specify the + desired new value. :type perform_preparing_health_check: bool - :param perform_restoring_health_check: A boolean indicating if health check is to be performed - in the Restoring stage of the repair task. If not specified the existing value should not be - altered. Otherwise, specify the desired new value. + :param perform_restoring_health_check: A boolean indicating if health + check is to be performed in the Restoring stage of the repair task. If not + specified the existing value should not be altered. Otherwise, specify the + desired new value. :type perform_restoring_health_check: bool """ @@ -18296,21 +17010,18 @@ class RepairTaskUpdateHealthPolicyDescription(msrest.serialization.Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RepairTaskUpdateHealthPolicyDescription, self).__init__(**kwargs) - self.task_id = kwargs['task_id'] + self.task_id = kwargs.get('task_id', None) self.version = kwargs.get('version', None) self.perform_preparing_health_check = kwargs.get('perform_preparing_health_check', None) self.perform_restoring_health_check = kwargs.get('perform_restoring_health_check', None) -class RepairTaskUpdateInfo(msrest.serialization.Model): +class RepairTaskUpdateInfo(Model): """Describes the result of an operation that created or updated a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. @@ -18326,43 +17037,44 @@ class RepairTaskUpdateInfo(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RepairTaskUpdateInfo, self).__init__(**kwargs) - self.version = kwargs['version'] + self.version = kwargs.get('version', None) class ReplicaHealth(EntityHealth): - """Represents a base class for stateful service replica or stateless service instance health. -Contains the replica aggregated health state, the health events and the unhealthy evaluations. + """Represents a base class for stateful service replica or stateless service + instance health. + Contains the replica aggregated health state, the health events and the + unhealthy evaluations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaHealth, StatelessServiceInstanceHealth. + sub-classes are: StatefulServiceReplicaHealth, + StatelessServiceInstanceHealth All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -18374,60 +17086,58 @@ class ReplicaHealth(EntityHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaHealth', 'Stateless': 'StatelessServiceInstanceHealth'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaHealth, self).__init__(**kwargs) - self.service_kind = 'ReplicaHealth' # type: str self.partition_id = kwargs.get('partition_id', None) + self.service_kind = None + self.service_kind = 'ReplicaHealth' class ReplicaHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a replica, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a replica, containing information about + the data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param partition_id: Id of the partition to which the replica belongs. :type partition_id: str - :param replica_or_instance_id: Id of a stateful service replica or a stateless service - instance. This ID is used in the queries that apply to both stateful and stateless services. It - is used by Service Fabric to uniquely identify a replica of a partition of a stateful service - or an instance of a stateless service partition. It is unique within a partition and does not - change for the lifetime of the replica or the instance. If a stateful replica gets dropped and - another replica gets created on the same node for the same partition, it will get a different - value for the ID. If a stateless instance is failed over on the same or different node it will + :param replica_or_instance_id: Id of a stateful service replica or a + stateless service instance. This ID is used in the queries that apply to + both stateful and stateless services. It is used by Service Fabric to + uniquely identify a replica of a partition of a stateful service or an + instance of a stateless service partition. It is unique within a partition + and does not change for the lifetime of the replica or the instance. If a + stateful replica gets dropped and another replica gets created on the same + node for the same partition, it will get a different value for the ID. If + a stateless instance is failed over on the same or different node it will get a different value for the ID. :type replica_or_instance_id: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the replica. The types of the unhealthy evaluations can be - EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the replica. The types of the + unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -18435,42 +17145,43 @@ class ReplicaHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Replica' # type: str self.partition_id = kwargs.get('partition_id', None) self.replica_or_instance_id = kwargs.get('replica_or_instance_id', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Replica' class ReplicaHealthState(EntityHealthState): - """Represents a base class for stateful service replica or stateless service instance health state. + """Represents a base class for stateful service replica or stateless service + instance health state. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaHealthState, StatelessServiceInstanceHealthState. + sub-classes are: StatefulServiceReplicaHealthState, + StatelessServiceInstanceHealthState All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param partition_id: The ID of the partition to which this replica belongs. + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: The ID of the partition to which this replica + belongs. :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -18479,38 +17190,40 @@ class ReplicaHealthState(EntityHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaHealthState', 'Stateless': 'StatelessServiceInstanceHealthState'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaHealthState, self).__init__(**kwargs) - self.service_kind = 'ReplicaHealthState' # type: str self.partition_id = kwargs.get('partition_id', None) + self.service_kind = None + self.service_kind = 'ReplicaHealthState' class ReplicaHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a stateful service replica or a stateless service instance. -The replica health state contains the replica ID and its aggregated health state. - - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + """Represents the health state chunk of a stateful service replica or a + stateless service instance. + The replica health state contains the replica ID and its aggregated health + state. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param replica_or_instance_id: Id of a stateful service replica or a stateless service - instance. This ID is used in the queries that apply to both stateful and stateless services. It - is used by Service Fabric to uniquely identify a replica of a partition of a stateful service - or an instance of a stateless service partition. It is unique within a partition and does not - change for the lifetime of the replica or the instance. If a stateful replica gets dropped and - another replica gets created on the same node for the same partition, it will get a different - value for the ID. If a stateless instance is failed over on the same or different node it will + :param replica_or_instance_id: Id of a stateful service replica or a + stateless service instance. This ID is used in the queries that apply to + both stateful and stateless services. It is used by Service Fabric to + uniquely identify a replica of a partition of a stateful service or an + instance of a stateless service partition. It is unique within a partition + and does not change for the lifetime of the replica or the instance. If a + stateful replica gets dropped and another replica gets created on the same + node for the same partition, it will get a different value for the ID. If + a stateless instance is failed over on the same or different node it will get a different value for the ID. :type replica_or_instance_id: str """ @@ -18520,19 +17233,17 @@ class ReplicaHealthStateChunk(EntityHealthStateChunk): 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaHealthStateChunk, self).__init__(**kwargs) self.replica_or_instance_id = kwargs.get('replica_or_instance_id', None) -class ReplicaHealthStateChunkList(msrest.serialization.Model): - """The list of replica health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. +class ReplicaHealthStateChunkList(Model): + """The list of replica health state chunks that respect the input filters in + the chunk query. Returned by get cluster health state chunks query. - :param items: The list of replica health state chunks that respect the input filters in the - chunk query. + :param items: The list of replica health state chunks that respect the + input filters in the chunk query. :type items: list[~azure.servicefabric.models.ReplicaHealthStateChunk] """ @@ -18540,49 +17251,56 @@ class ReplicaHealthStateChunkList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ReplicaHealthStateChunk]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class ReplicaHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a replica should be included as a child of a partition in the cluster health chunk. -The replicas are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent partition, service and application must be included in the cluster health chunk. -One filter can match zero, one or multiple replicas, depending on its properties. - - :param replica_or_instance_id_filter: Id of the stateful service replica or stateless service - instance that matches the filter. The filter is applied only to the specified replica, if it - exists. - If the replica doesn't exist, no replica is returned in the cluster health chunk based on this - filter. - If the replica exists, it is included in the cluster health chunk if it respects the other - filter properties. - If not specified, all replicas that match the parent filters (if any) are taken into - consideration and matched against the other filter members, like health state filter. +class ReplicaHealthStateFilter(Model): + """Defines matching criteria to determine whether a replica should be included + as a child of a partition in the cluster health chunk. + The replicas are only returned if the parent entities match a filter + specified in the cluster health chunk query description. The parent + partition, service and application must be included in the cluster health + chunk. + One filter can match zero, one or multiple replicas, depending on its + properties. + + :param replica_or_instance_id_filter: Id of the stateful service replica + or stateless service instance that matches the filter. The filter is + applied only to the specified replica, if it exists. + If the replica doesn't exist, no replica is returned in the cluster health + chunk based on this filter. + If the replica exists, it is included in the cluster health chunk if it + respects the other filter properties. + If not specified, all replicas that match the parent filters (if any) are + taken into consideration and matched against the other filter members, + like health state filter. :type replica_or_instance_id_filter: str - :param health_state_filter: The filter for the health state of the replicas. It allows - selecting replicas if they match the desired health states. - The possible values are integer value of one of the following health states. Only replicas - that match the filter are returned. All replicas are used to evaluate the parent partition - aggregated health state. - If not specified, default value is None, unless the replica ID is specified. If the filter has - default value and replica ID is specified, the matching replica is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches replicas with HealthState value of OK (2) - and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + replicas. It allows selecting replicas if they match the desired health + states. + The possible values are integer value of one of the following health + states. Only replicas that match the filter are returned. All replicas are + used to evaluate the parent partition aggregated health state. + If not specified, default value is None, unless the replica ID is + specified. If the filter has default value and replica ID is specified, + the matching replica is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches replicas with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int """ @@ -18591,39 +17309,38 @@ class ReplicaHealthStateFilter(msrest.serialization.Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaHealthStateFilter, self).__init__(**kwargs) self.replica_or_instance_id_filter = kwargs.get('replica_or_instance_id_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) -class ReplicaInfo(msrest.serialization.Model): - """Information about the identity, status, health, node name, uptime, and other details about the replica. +class ReplicaInfo(Model): + """Information about the identity, status, health, node name, uptime, and + other details about the replica. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo. + sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of + the replica in seconds. :type last_in_build_duration_in_seconds: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -18631,39 +17348,37 @@ class ReplicaInfo(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaInfo', 'Stateless': 'StatelessServiceInstanceInfo'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaInfo, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.replica_status = kwargs.get('replica_status', None) self.health_state = kwargs.get('health_state', None) self.node_name = kwargs.get('node_name', None) self.address = kwargs.get('address', None) self.last_in_build_duration_in_seconds = kwargs.get('last_in_build_duration_in_seconds', None) + self.service_kind = None -class ReplicaLifecycleDescription(msrest.serialization.Model): +class ReplicaLifecycleDescription(Model): """Describes how the replica will behave. - :param is_singleton_replica_move_allowed_during_upgrade: If set to true, replicas with a target - replica set size of 1 will be permitted to move during upgrade. + :param is_singleton_replica_move_allowed_during_upgrade: If set to true, + replicas with a target replica set size of 1 will be permitted to move + during upgrade. :type is_singleton_replica_move_allowed_during_upgrade: bool - :param restore_replica_location_after_upgrade: If set to true, move/swap replica to original - location after upgrade. + :param restore_replica_location_after_upgrade: If set to true, move/swap + replica to original location after upgrade. :type restore_replica_location_after_upgrade: bool """ @@ -18672,23 +17387,22 @@ class ReplicaLifecycleDescription(msrest.serialization.Model): 'restore_replica_location_after_upgrade': {'key': 'RestoreReplicaLocationAfterUpgrade', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaLifecycleDescription, self).__init__(**kwargs) self.is_singleton_replica_move_allowed_during_upgrade = kwargs.get('is_singleton_replica_move_allowed_during_upgrade', None) self.restore_replica_location_after_upgrade = kwargs.get('restore_replica_location_after_upgrade', None) -class ReplicaMetricLoadDescription(msrest.serialization.Model): - """Specifies metric loads of a partition's specific secondary replica or instance. +class ReplicaMetricLoadDescription(Model): + """Specifies metric loads of a partition's specific secondary replica or + instance. :param node_name: Node name of a specific secondary replica or instance. :type node_name: str - :param replica_or_instance_load_entries: Loads of a different metrics for a partition's - secondary replica or instance. - :type replica_or_instance_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] + :param replica_or_instance_load_entries: Loads of a different metrics for + a partition's secondary replica or instance. + :type replica_or_instance_load_entries: + list[~azure.servicefabric.models.MetricLoadDescription] """ _attribute_map = { @@ -18696,45 +17410,43 @@ class ReplicaMetricLoadDescription(msrest.serialization.Model): 'replica_or_instance_load_entries': {'key': 'ReplicaOrInstanceLoadEntries', 'type': '[MetricLoadDescription]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicaMetricLoadDescription, self).__init__(**kwargs) self.node_name = kwargs.get('node_name', None) self.replica_or_instance_load_entries = kwargs.get('replica_or_instance_load_entries', None) class ReplicasHealthEvaluation(HealthEvaluation): - """Represents health evaluation for replicas, containing health evaluations for each unhealthy replica that impacted current aggregated health state. Can be returned when evaluating partition health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for replicas, containing health evaluations + for each unhealthy replica that impacted current aggregated health state. + Can be returned when evaluating partition health and the aggregated health + state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_replicas_per_partition: Maximum allowed percentage of unhealthy - replicas per partition from the ApplicationHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_replicas_per_partition: Maximum allowed + percentage of unhealthy replicas per partition from the + ApplicationHealthPolicy. :type max_percent_unhealthy_replicas_per_partition: int - :param total_count: Total number of replicas in the partition from the health store. + :param total_count: Total number of replicas in the partition from the + health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy ReplicaHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ReplicaHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -18742,54 +17454,58 @@ class ReplicasHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_replicas_per_partition': {'key': 'MaxPercentUnhealthyReplicasPerPartition', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicasHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Replicas' # type: str self.max_percent_unhealthy_replicas_per_partition = kwargs.get('max_percent_unhealthy_replicas_per_partition', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Replicas' -class ReplicatorQueueStatus(msrest.serialization.Model): - """Provides various statistics of the queue used in the service fabric replicator. -Contains information about the service fabric replicator like the replication/copy queue utilization, last acknowledgement received timestamp, etc. -Depending on the role of the replicator, the properties in this type imply different meanings. +class ReplicatorQueueStatus(Model): + """Provides various statistics of the queue used in the service fabric + replicator. + Contains information about the service fabric replicator like the + replication/copy queue utilization, last acknowledgement received + timestamp, etc. + Depending on the role of the replicator, the properties in this type imply + different meanings. - :param queue_utilization_percentage: Represents the utilization of the queue. A value of 0 - indicates that the queue is empty and a value of 100 indicates the queue is full. + :param queue_utilization_percentage: Represents the utilization of the + queue. A value of 0 indicates that the queue is empty and a value of 100 + indicates the queue is full. :type queue_utilization_percentage: int - :param queue_memory_size: Represents the virtual memory consumed by the queue in bytes. + :param queue_memory_size: Represents the virtual memory consumed by the + queue in bytes. :type queue_memory_size: str - :param first_sequence_number: On a primary replicator, this is semantically the sequence number - of the operation for which all the secondary replicas have sent an acknowledgement. - On a secondary replicator, this is the smallest sequence number of the operation that is - present in the queue. + :param first_sequence_number: On a primary replicator, this is + semantically the sequence number of the operation for which all the + secondary replicas have sent an acknowledgement. + On a secondary replicator, this is the smallest sequence number of the + operation that is present in the queue. :type first_sequence_number: str - :param completed_sequence_number: On a primary replicator, this is semantically the highest - sequence number of the operation for which all the secondary replicas have sent an - acknowledgement. - On a secondary replicator, this is semantically the highest sequence number that has been - applied to the persistent state. + :param completed_sequence_number: On a primary replicator, this is + semantically the highest sequence number of the operation for which all + the secondary replicas have sent an acknowledgement. + On a secondary replicator, this is semantically the highest sequence + number that has been applied to the persistent state. :type completed_sequence_number: str - :param committed_sequence_number: On a primary replicator, this is semantically the highest - sequence number of the operation for which a write quorum of the secondary replicas have sent - an acknowledgement. - On a secondary replicator, this is semantically the highest sequence number of the in-order - operation received from the primary. + :param committed_sequence_number: On a primary replicator, this is + semantically the highest sequence number of the operation for which a + write quorum of the secondary replicas have sent an acknowledgement. + On a secondary replicator, this is semantically the highest sequence + number of the in-order operation received from the primary. :type committed_sequence_number: str - :param last_sequence_number: Represents the latest sequence number of the operation that is - available in the queue. + :param last_sequence_number: Represents the latest sequence number of the + operation that is available in the queue. :type last_sequence_number: str """ @@ -18802,10 +17518,7 @@ class ReplicatorQueueStatus(msrest.serialization.Model): 'last_sequence_number': {'key': 'LastSequenceNumber', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ReplicatorQueueStatus, self).__init__(**kwargs) self.queue_utilization_percentage = kwargs.get('queue_utilization_percentage', None) self.queue_memory_size = kwargs.get('queue_memory_size', None) @@ -18815,14 +17528,16 @@ def __init__( self.last_sequence_number = kwargs.get('last_sequence_number', None) -class ResolvedServiceEndpoint(msrest.serialization.Model): +class ResolvedServiceEndpoint(Model): """Endpoint of a resolved service partition. - :param kind: The role of the replica where the endpoint is reported. Possible values include: - "Invalid", "Stateless", "StatefulPrimary", "StatefulSecondary". + :param kind: The role of the replica where the endpoint is reported. + Possible values include: 'Invalid', 'Stateless', 'StatefulPrimary', + 'StatefulSecondary' :type kind: str or ~azure.servicefabric.models.ServiceEndpointRole - :param address: The address of the endpoint. If the endpoint has multiple listeners the address - is a JSON object with one property per listener with the value as the address of that listener. + :param address: The address of the endpoint. If the endpoint has multiple + listeners the address is a JSON object with one property per listener with + the value as the address of that listener. :type address: str """ @@ -18831,29 +17546,30 @@ class ResolvedServiceEndpoint(msrest.serialization.Model): 'address': {'key': 'Address', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ResolvedServiceEndpoint, self).__init__(**kwargs) self.kind = kwargs.get('kind', None) self.address = kwargs.get('address', None) -class ResolvedServicePartition(msrest.serialization.Model): +class ResolvedServicePartition(Model): """Information about a service partition and its associated endpoints. All required parameters must be populated in order to send to Azure. - :param name: Required. The full name of the service with 'fabric:' URI scheme. + :param name: Required. The full name of the service with 'fabric:' URI + scheme. :type name: str - :param partition_information: Required. A representation of the resolved partition. - :type partition_information: ~azure.servicefabric.models.PartitionInformation - :param endpoints: Required. List of resolved service endpoints of a service partition. + :param partition_information: Required. A representation of the resolved + partition. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param endpoints: Required. List of resolved service endpoints of a + service partition. :type endpoints: list[~azure.servicefabric.models.ResolvedServiceEndpoint] - :param version: Required. The version of this resolved service partition result. This version - should be passed in the next time the ResolveService call is made via the PreviousRspVersion - query parameter. + :param version: Required. The version of this resolved service partition + result. This version should be passed in the next time the ResolveService + call is made via the PreviousRspVersion query parameter. :type version: str """ @@ -18871,23 +17587,23 @@ class ResolvedServicePartition(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ResolvedServicePartition, self).__init__(**kwargs) - self.name = kwargs['name'] - self.partition_information = kwargs['partition_information'] - self.endpoints = kwargs['endpoints'] - self.version = kwargs['version'] + self.name = kwargs.get('name', None) + self.partition_information = kwargs.get('partition_information', None) + self.endpoints = kwargs.get('endpoints', None) + self.version = kwargs.get('version', None) -class ResourceLimits(msrest.serialization.Model): - """This type describes the resource limits for a given container. It describes the most amount of resources a container is allowed to use before being restarted. +class ResourceLimits(Model): + """This type describes the resource limits for a given container. It describes + the most amount of resources a container is allowed to use before being + restarted. :param memory_in_gb: The memory limit in GB. :type memory_in_gb: float - :param cpu: CPU limits in cores. At present, only full cores are supported. + :param cpu: CPU limits in cores. At present, only full cores are + supported. :type cpu: float """ @@ -18896,23 +17612,26 @@ class ResourceLimits(msrest.serialization.Model): 'cpu': {'key': 'cpu', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ResourceLimits, self).__init__(**kwargs) self.memory_in_gb = kwargs.get('memory_in_gb', None) self.cpu = kwargs.get('cpu', None) -class ResourceRequests(msrest.serialization.Model): - """This type describes the requested resources for a given container. It describes the least amount of resources required for the container. A container can consume more than requested resources up to the specified limits before being restarted. Currently, the requested resources are treated as limits. +class ResourceRequests(Model): + """This type describes the requested resources for a given container. It + describes the least amount of resources required for the container. A + container can consume more than requested resources up to the specified + limits before being restarted. Currently, the requested resources are + treated as limits. All required parameters must be populated in order to send to Azure. - :param memory_in_gb: Required. The memory request in GB for this container. + :param memory_in_gb: Required. The memory request in GB for this + container. :type memory_in_gb: float - :param cpu: Required. Requested number of CPU cores. At present, only full cores are supported. + :param cpu: Required. Requested number of CPU cores. At present, only full + cores are supported. :type cpu: float """ @@ -18926,23 +17645,22 @@ class ResourceRequests(msrest.serialization.Model): 'cpu': {'key': 'cpu', 'type': 'float'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ResourceRequests, self).__init__(**kwargs) - self.memory_in_gb = kwargs['memory_in_gb'] - self.cpu = kwargs['cpu'] + self.memory_in_gb = kwargs.get('memory_in_gb', None) + self.cpu = kwargs.get('cpu', None) -class ResourceRequirements(msrest.serialization.Model): +class ResourceRequirements(Model): """This type describes the resource requirements for a container or a service. All required parameters must be populated in order to send to Azure. - :param requests: Required. Describes the requested resources for a given container. + :param requests: Required. Describes the requested resources for a given + container. :type requests: ~azure.servicefabric.models.ResourceRequests - :param limits: Describes the maximum limits on the resources for a given container. + :param limits: Describes the maximum limits on the resources for a given + container. :type limits: ~azure.servicefabric.models.ResourceLimits """ @@ -18955,42 +17673,41 @@ class ResourceRequirements(msrest.serialization.Model): 'limits': {'key': 'limits', 'type': 'ResourceLimits'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ResourceRequirements, self).__init__(**kwargs) - self.requests = kwargs['requests'] + self.requests = kwargs.get('requests', None) self.limits = kwargs.get('limits', None) -class RestartDeployedCodePackageDescription(msrest.serialization.Model): - """Defines description for restarting a deployed code package on Service Fabric node. +class RestartDeployedCodePackageDescription(Model): + """Defines description for restarting a deployed code package on Service + Fabric node. All required parameters must be populated in order to send to Azure. - :param service_manifest_name: Required. The name of service manifest that specified this code - package. + :param service_manifest_name: Required. The name of service manifest that + specified this code package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param code_package_name: Required. The name of the code package defined in the service - manifest. + :param code_package_name: Required. The name of the code package defined + in the service manifest. :type code_package_name: str - :param code_package_instance_id: Required. The instance ID for currently running entry point. - For a code package setup entry point (if specified) runs first and after it finishes main entry - point is started. - Each time entry point executable is run, its instance ID will change. If 0 is passed in as the - code package instance ID, the API will restart the code package with whatever instance ID it is - currently running. - If an instance ID other than 0 is passed in, the API will restart the code package only if the - current Instance ID matches the passed in instance ID. - Note, passing in the exact instance ID (not 0) in the API is safer, because if ensures at most - one restart of the code package. + :param code_package_instance_id: Required. The instance ID for currently + running entry point. For a code package setup entry point (if specified) + runs first and after it finishes main entry point is started. + Each time entry point executable is run, its instance ID will change. If 0 + is passed in as the code package instance ID, the API will restart the + code package with whatever instance ID it is currently running. + If an instance ID other than 0 is passed in, the API will restart the code + package only if the current Instance ID matches the passed in instance ID. + Note, passing in the exact instance ID (not 0) in the API is safer, + because if ensures at most one restart of the code package. :type code_package_instance_id: str """ @@ -19007,30 +17724,30 @@ class RestartDeployedCodePackageDescription(msrest.serialization.Model): 'code_package_instance_id': {'key': 'CodePackageInstanceId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RestartDeployedCodePackageDescription, self).__init__(**kwargs) - self.service_manifest_name = kwargs['service_manifest_name'] + self.service_manifest_name = kwargs.get('service_manifest_name', None) self.service_package_activation_id = kwargs.get('service_package_activation_id', None) - self.code_package_name = kwargs['code_package_name'] - self.code_package_instance_id = kwargs['code_package_instance_id'] + self.code_package_name = kwargs.get('code_package_name', None) + self.code_package_instance_id = kwargs.get('code_package_instance_id', None) -class RestartNodeDescription(msrest.serialization.Model): +class RestartNodeDescription(Model): """Describes the parameters to restart a Service Fabric node. All required parameters must be populated in order to send to Azure. - :param node_instance_id: Required. The instance ID of the target node. If instance ID is - specified the node is restarted only if it matches with the current instance of the node. A - default value of "0" would match any instance ID. The instance ID can be obtained using get - node query. + :param node_instance_id: Required. The instance ID of the target node. If + instance ID is specified the node is restarted only if it matches with the + current instance of the node. A default value of "0" would match any + instance ID. The instance ID can be obtained using get node query. Default + value: "0" . :type node_instance_id: str - :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is - case-sensitive. Possible values include: "False", "True". Default value: "False". - :type create_fabric_dump: str or ~azure.servicefabric.models.CreateFabricDump + :param create_fabric_dump: Specify True to create a dump of the fabric + node process. This is case-sensitive. Possible values include: 'False', + 'True'. Default value: "False" . + :type create_fabric_dump: str or + ~azure.servicefabric.models.CreateFabricDump """ _validation = { @@ -19042,23 +17759,21 @@ class RestartNodeDescription(msrest.serialization.Model): 'create_fabric_dump': {'key': 'CreateFabricDump', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RestartNodeDescription, self).__init__(**kwargs) self.node_instance_id = kwargs.get('node_instance_id', "0") self.create_fabric_dump = kwargs.get('create_fabric_dump', "False") -class RestartPartitionResult(msrest.serialization.Model): - """Represents information about an operation in a terminal state (Completed or Faulted). +class RestartPartitionResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). - :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, - this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the partition that the - user-induced operation acted upon. + :param selected_partition: This class returns information about the + partition that the user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -19067,26 +17782,25 @@ class RestartPartitionResult(msrest.serialization.Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RestartPartitionResult, self).__init__(**kwargs) self.error_code = kwargs.get('error_code', None) self.selected_partition = kwargs.get('selected_partition', None) -class RestorePartitionDescription(msrest.serialization.Model): - """Specifies the parameters needed to trigger a restore of a specific partition. +class RestorePartitionDescription(Model): + """Specifies the parameters needed to trigger a restore of a specific + partition. All required parameters must be populated in order to send to Azure. :param backup_id: Required. Unique backup ID. :type backup_id: str - :param backup_location: Required. Location of the backup relative to the backup storage - specified/ configured. + :param backup_location: Required. Location of the backup relative to the + backup storage specified/ configured. :type backup_location: str - :param backup_storage: Location of the backup from where the partition will be restored. + :param backup_storage: Location of the backup from where the partition + will be restored. :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription """ @@ -19101,29 +17815,29 @@ class RestorePartitionDescription(msrest.serialization.Model): 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RestorePartitionDescription, self).__init__(**kwargs) - self.backup_id = kwargs['backup_id'] - self.backup_location = kwargs['backup_location'] + self.backup_id = kwargs.get('backup_id', None) + self.backup_location = kwargs.get('backup_location', None) self.backup_storage = kwargs.get('backup_storage', None) -class RestoreProgressInfo(msrest.serialization.Model): +class RestoreProgressInfo(Model): """Describes the progress of a restore operation on a partition. - :param restore_state: Represents the current state of the partition restore operation. Possible - values include: "Invalid", "Accepted", "RestoreInProgress", "Success", "Failure", "Timeout". + :param restore_state: Represents the current state of the partition + restore operation. Possible values include: 'Invalid', 'Accepted', + 'RestoreInProgress', 'Success', 'Failure', 'Timeout' :type restore_state: str or ~azure.servicefabric.models.RestoreState :param time_stamp_utc: Timestamp when operation succeeded or failed. - :type time_stamp_utc: ~datetime.datetime - :param restored_epoch: Describes the epoch at which the partition is restored. + :type time_stamp_utc: datetime + :param restored_epoch: Describes the epoch at which the partition is + restored. :type restored_epoch: ~azure.servicefabric.models.Epoch :param restored_lsn: Restored LSN. :type restored_lsn: str - :param failure_error: Denotes the failure encountered in performing restore operation. + :param failure_error: Denotes the failure encountered in performing + restore operation. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -19135,10 +17849,7 @@ class RestoreProgressInfo(msrest.serialization.Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RestoreProgressInfo, self).__init__(**kwargs) self.restore_state = kwargs.get('restore_state', None) self.time_stamp_utc = kwargs.get('time_stamp_utc', None) @@ -19147,13 +17858,14 @@ def __init__( self.failure_error = kwargs.get('failure_error', None) -class ResumeApplicationUpgradeDescription(msrest.serialization.Model): - """Describes the parameters for resuming an unmonitored manual Service Fabric application upgrade. +class ResumeApplicationUpgradeDescription(Model): + """Describes the parameters for resuming an unmonitored manual Service Fabric + application upgrade. All required parameters must be populated in order to send to Azure. - :param upgrade_domain_name: Required. The name of the upgrade domain in which to resume the - upgrade. + :param upgrade_domain_name: Required. The name of the upgrade domain in + which to resume the upgrade. :type upgrade_domain_name: str """ @@ -19165,20 +17877,18 @@ class ResumeApplicationUpgradeDescription(msrest.serialization.Model): 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ResumeApplicationUpgradeDescription, self).__init__(**kwargs) - self.upgrade_domain_name = kwargs['upgrade_domain_name'] + self.upgrade_domain_name = kwargs.get('upgrade_domain_name', None) -class ResumeClusterUpgradeDescription(msrest.serialization.Model): +class ResumeClusterUpgradeDescription(Model): """Describes the parameters for resuming a cluster upgrade. All required parameters must be populated in order to send to Azure. - :param upgrade_domain: Required. The next upgrade domain for this cluster upgrade. + :param upgrade_domain: Required. The next upgrade domain for this cluster + upgrade. :type upgrade_domain: str """ @@ -19190,74 +17900,83 @@ class ResumeClusterUpgradeDescription(msrest.serialization.Model): 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ResumeClusterUpgradeDescription, self).__init__(**kwargs) - self.upgrade_domain = kwargs['upgrade_domain'] + self.upgrade_domain = kwargs.get('upgrade_domain', None) -class RollingUpgradeUpdateDescription(msrest.serialization.Model): - """Describes the parameters for updating a rolling upgrade of application or cluster. +class RollingUpgradeUpdateDescription(Model): + """Describes the parameters for updating a rolling upgrade of application or + cluster. All required parameters must be populated in order to send to Azure. - :param rolling_upgrade_mode: Required. The mode used to monitor health during a rolling - upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values - include: "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: Required. The mode used to monitor health + during a rolling upgrade. The values are UnmonitoredAuto, + UnmonitoredManual, and Monitored. Possible values include: 'Invalid', + 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: + "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param replica_set_check_timeout_in_milliseconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param replica_set_check_timeout_in_milliseconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type replica_set_check_timeout_in_milliseconds: long - :param failure_action: The compensating action to perform when a Monitored upgrade encounters - monitoring policy or health policy violations. - Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will - start rolling back automatically. - Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. Possible - values include: "Invalid", "Rollback", "Manual". + :param failure_action: The compensating action to perform when a Monitored + upgrade encounters monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that + the upgrade will start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade + mode. Possible values include: 'Invalid', 'Rollback', 'Manual' :type failure_action: str or ~azure.servicefabric.models.FailureAction - :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing - an upgrade domain before applying health policies. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted as a number - representing the total number of milliseconds. + :param health_check_wait_duration_in_milliseconds: The amount of time to + wait after completing an upgrade domain before applying health policies. + It is first interpreted as a string representing an ISO 8601 duration. If + that fails, then it is interpreted as a number representing the total + number of milliseconds. :type health_check_wait_duration_in_milliseconds: str - :param health_check_stable_duration_in_milliseconds: The amount of time that the application or - cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first - interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param health_check_stable_duration_in_milliseconds: The amount of time + that the application or cluster must remain healthy before the upgrade + proceeds to the next upgrade domain. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type health_check_stable_duration_in_milliseconds: str - :param health_check_retry_timeout_in_milliseconds: The amount of time to retry health - evaluation when the application or cluster is unhealthy before FailureAction is executed. It is - first interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param health_check_retry_timeout_in_milliseconds: The amount of time to + retry health evaluation when the application or cluster is unhealthy + before FailureAction is executed. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type health_check_retry_timeout_in_milliseconds: str - :param upgrade_timeout_in_milliseconds: The amount of time the overall upgrade has to complete - before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 - duration. If that fails, then it is interpreted as a number representing the total number of + :param upgrade_timeout_in_milliseconds: The amount of time the overall + upgrade has to complete before FailureAction is executed. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, + then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout_in_milliseconds: str - :param upgrade_domain_timeout_in_milliseconds: The amount of time each upgrade domain has to - complete before FailureAction is executed. It is first interpreted as a string representing an - ISO 8601 duration. If that fails, then it is interpreted as a number representing the total - number of milliseconds. + :param upgrade_domain_timeout_in_milliseconds: The amount of time each + upgrade domain has to complete before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that + fails, then it is interpreted as a number representing the total number of + milliseconds. :type upgrade_domain_timeout_in_milliseconds: str - :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a - stateless instance is closed, to allow the active requests to drain gracefully. This would be - effective when the instance is closing during the application/cluster - upgrade, only for those instances which have a non-zero delay duration configured in the - service description. See InstanceCloseDelayDurationSeconds property in $ref: + :param instance_close_delay_duration_in_seconds: Duration in seconds, to + wait before a stateless instance is closed, to allow the active requests + to drain gracefully. This would be effective when the instance is closing + during the application/cluster + upgrade, only for those instances which have a non-zero delay duration + configured in the service description. See + InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates - that the behavior will entirely depend on the delay configured in the stateless service - description. + Note, the default value of InstanceCloseDelayDurationInSeconds is + 4294967295, which indicates that the behavior will entirely depend on the + delay configured in the stateless service description. :type instance_close_delay_duration_in_seconds: long """ @@ -19278,33 +17997,34 @@ class RollingUpgradeUpdateDescription(msrest.serialization.Model): 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RollingUpgradeUpdateDescription, self).__init__(**kwargs) self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.force_restart = kwargs.get('force_restart', False) - self.replica_set_check_timeout_in_milliseconds = kwargs.get('replica_set_check_timeout_in_milliseconds', 42949672925) + self.force_restart = kwargs.get('force_restart', None) + self.replica_set_check_timeout_in_milliseconds = kwargs.get('replica_set_check_timeout_in_milliseconds', None) self.failure_action = kwargs.get('failure_action', None) - self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', "0") - self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', "PT0H2M0S") - self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', "PT0H10M0S") - self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', "P10675199DT02H48M05.4775807S") - self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', "P10675199DT02H48M05.4775807S") - self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', 4294967295) + self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', None) + self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', None) + self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', None) + self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', None) + self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', None) + self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', None) class RunToCompletionExecutionPolicy(ExecutionPolicy): - """The run to completion execution policy, the service will perform its desired operation and complete successfully. If the service encounters failure, it will restarted based on restart policy specified. If the service completes its operation successfully, it will not be restarted again. + """The run to completion execution policy, the service will perform its + desired operation and complete successfully. If the service encounters + failure, it will restarted based on restart policy specified. If the + service completes its operation successfully, it will not be restarted + again. All required parameters must be populated in order to send to Azure. - :param type: Required. Enumerates the execution policy types for services.Constant filled by - server. Possible values include: "Default", "RunToCompletion". - :type type: str or ~azure.servicefabric.models.ExecutionPolicyType - :param restart: Required. Enumerates the restart policy for RunToCompletionExecutionPolicy. - Possible values include: "OnFailure", "Never". + :param type: Required. Constant filled by server. + :type type: str + :param restart: Required. Enumerates the restart policy for + RunToCompletionExecutionPolicy. Possible values include: 'OnFailure', + 'Never' :type restart: str or ~azure.servicefabric.models.RestartPolicy """ @@ -19318,21 +18038,20 @@ class RunToCompletionExecutionPolicy(ExecutionPolicy): 'restart': {'key': 'restart', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(RunToCompletionExecutionPolicy, self).__init__(**kwargs) - self.type = 'RunToCompletion' # type: str - self.restart = kwargs['restart'] + self.restart = kwargs.get('restart', None) + self.type = 'RunToCompletion' -class SafetyCheckWrapper(msrest.serialization.Model): - """A wrapper for the safety check object. Safety checks are performed by service fabric before continuing with the operations. These checks ensure the availability of the service and the reliability of the state. +class SafetyCheckWrapper(Model): + """A wrapper for the safety check object. Safety checks are performed by + service fabric before continuing with the operations. These checks ensure + the availability of the service and the reliability of the state. - :param safety_check: Represents a safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. + :param safety_check: Represents a safety check performed by service fabric + before continuing with the operations. These checks ensure the + availability of the service and the reliability of the state. :type safety_check: ~azure.servicefabric.models.SafetyCheck """ @@ -19340,24 +18059,24 @@ class SafetyCheckWrapper(msrest.serialization.Model): 'safety_check': {'key': 'SafetyCheck', 'type': 'SafetyCheck'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SafetyCheckWrapper, self).__init__(**kwargs) self.safety_check = kwargs.get('safety_check', None) -class ScalingPolicyDescription(msrest.serialization.Model): +class ScalingPolicyDescription(Model): """Describes how the scaling should be performed. All required parameters must be populated in order to send to Azure. - :param scaling_trigger: Required. Specifies the trigger associated with this scaling policy. - :type scaling_trigger: ~azure.servicefabric.models.ScalingTriggerDescription - :param scaling_mechanism: Required. Specifies the mechanism associated with this scaling - policy. - :type scaling_mechanism: ~azure.servicefabric.models.ScalingMechanismDescription + :param scaling_trigger: Required. Specifies the trigger associated with + this scaling policy + :type scaling_trigger: + ~azure.servicefabric.models.ScalingTriggerDescription + :param scaling_mechanism: Required. Specifies the mechanism associated + with this scaling policy + :type scaling_mechanism: + ~azure.servicefabric.models.ScalingMechanismDescription """ _validation = { @@ -19370,47 +18089,49 @@ class ScalingPolicyDescription(msrest.serialization.Model): 'scaling_mechanism': {'key': 'ScalingMechanism', 'type': 'ScalingMechanismDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ScalingPolicyDescription, self).__init__(**kwargs) - self.scaling_trigger = kwargs['scaling_trigger'] - self.scaling_mechanism = kwargs['scaling_mechanism'] + self.scaling_trigger = kwargs.get('scaling_trigger', None) + self.scaling_mechanism = kwargs.get('scaling_mechanism', None) class SecondaryReplicatorStatus(ReplicatorStatus): - """Provides statistics about the Service Fabric Replicator, when it is functioning in a ActiveSecondary role. + """Provides statistics about the Service Fabric Replicator, when it is + functioning in a ActiveSecondary role. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SecondaryActiveReplicatorStatus, SecondaryIdleReplicatorStatus. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole - :param replication_queue_status: Details about the replication queue on the secondary - replicator. - :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a - replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation message was never - received. - :type last_replication_operation_received_time_utc: ~datetime.datetime - :param is_in_build: Value that indicates whether the replica is currently being built. + sub-classes are: SecondaryActiveReplicatorStatus, + SecondaryIdleReplicatorStatus + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the secondary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp + (UTC) at which a replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation + message was never received. + :type last_replication_operation_received_time_utc: datetime + :param is_in_build: Value that indicates whether the replica is currently + being built. :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary replicator. + :param copy_queue_status: Details about the copy queue on the secondary + replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy - operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation message was never - received. - :type last_copy_operation_received_time_utc: ~datetime.datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment - was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. - :type last_acknowledgement_sent_time_utc: ~datetime.datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at + which a copy operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation + message was never received. + :type last_copy_operation_received_time_utc: datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at + which an acknowledgment was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment + message was never sent. + :type last_acknowledgement_sent_time_utc: datetime """ _validation = { @@ -19431,49 +18152,50 @@ class SecondaryReplicatorStatus(ReplicatorStatus): 'kind': {'ActiveSecondary': 'SecondaryActiveReplicatorStatus', 'IdleSecondary': 'SecondaryIdleReplicatorStatus'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SecondaryReplicatorStatus, self).__init__(**kwargs) - self.kind = 'SecondaryReplicatorStatus' # type: str self.replication_queue_status = kwargs.get('replication_queue_status', None) self.last_replication_operation_received_time_utc = kwargs.get('last_replication_operation_received_time_utc', None) self.is_in_build = kwargs.get('is_in_build', None) self.copy_queue_status = kwargs.get('copy_queue_status', None) self.last_copy_operation_received_time_utc = kwargs.get('last_copy_operation_received_time_utc', None) self.last_acknowledgement_sent_time_utc = kwargs.get('last_acknowledgement_sent_time_utc', None) + self.kind = 'SecondaryReplicatorStatus' class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): - """Status of the secondary replicator when it is in active mode and is part of the replica set. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole - :param replication_queue_status: Details about the replication queue on the secondary - replicator. - :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a - replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation message was never - received. - :type last_replication_operation_received_time_utc: ~datetime.datetime - :param is_in_build: Value that indicates whether the replica is currently being built. + """Status of the secondary replicator when it is in active mode and is part of + the replica set. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the secondary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp + (UTC) at which a replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation + message was never received. + :type last_replication_operation_received_time_utc: datetime + :param is_in_build: Value that indicates whether the replica is currently + being built. :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary replicator. + :param copy_queue_status: Details about the copy queue on the secondary + replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy - operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation message was never - received. - :type last_copy_operation_received_time_utc: ~datetime.datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment - was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. - :type last_acknowledgement_sent_time_utc: ~datetime.datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at + which a copy operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation + message was never received. + :type last_copy_operation_received_time_utc: datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at + which an acknowledgment was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment + message was never sent. + :type last_acknowledgement_sent_time_utc: datetime """ _validation = { @@ -19490,43 +18212,44 @@ class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SecondaryActiveReplicatorStatus, self).__init__(**kwargs) - self.kind = 'ActiveSecondary' # type: str + self.kind = 'ActiveSecondary' class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): - """Status of the secondary replicator when it is in idle mode and is being built by the primary. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole - :param replication_queue_status: Details about the replication queue on the secondary - replicator. - :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a - replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation message was never - received. - :type last_replication_operation_received_time_utc: ~datetime.datetime - :param is_in_build: Value that indicates whether the replica is currently being built. + """Status of the secondary replicator when it is in idle mode and is being + built by the primary. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the secondary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp + (UTC) at which a replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation + message was never received. + :type last_replication_operation_received_time_utc: datetime + :param is_in_build: Value that indicates whether the replica is currently + being built. :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary replicator. + :param copy_queue_status: Details about the copy queue on the secondary + replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy - operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation message was never - received. - :type last_copy_operation_received_time_utc: ~datetime.datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment - was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. - :type last_acknowledgement_sent_time_utc: ~datetime.datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at + which a copy operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation + message was never received. + :type last_copy_operation_received_time_utc: datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at + which an acknowledgment was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment + message was never sent. + :type last_acknowledgement_sent_time_utc: datetime """ _validation = { @@ -19543,20 +18266,18 @@ class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SecondaryIdleReplicatorStatus, self).__init__(**kwargs) - self.kind = 'IdleSecondary' # type: str + self.kind = 'IdleSecondary' -class SecretResourceDescription(msrest.serialization.Model): +class SecretResourceDescription(Model): """This type describes a secret resource. All required parameters must be populated in order to send to Azure. - :param properties: Required. Describes the properties of a secret resource. + :param properties: Required. Describes the properties of a secret + resource. :type properties: ~azure.servicefabric.models.SecretResourceProperties :param name: Required. Name of the Secret resource. :type name: str @@ -19572,16 +18293,13 @@ class SecretResourceDescription(msrest.serialization.Model): 'name': {'key': 'name', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SecretResourceDescription, self).__init__(**kwargs) - self.properties = kwargs['properties'] - self.name = kwargs['name'] + self.properties = kwargs.get('properties', None) + self.name = kwargs.get('name', None) -class SecretValue(msrest.serialization.Model): +class SecretValue(Model): """This type represents the unencrypted value of the secret. :param value: The actual value of the secret. @@ -19592,15 +18310,12 @@ class SecretValue(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SecretValue, self).__init__(**kwargs) self.value = kwargs.get('value', None) -class SecretValueProperties(msrest.serialization.Model): +class SecretValueProperties(Model): """This type describes properties of secret value resource. :param value: The actual value of the secret. @@ -19611,16 +18326,14 @@ class SecretValueProperties(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SecretValueProperties, self).__init__(**kwargs) self.value = kwargs.get('value', None) -class SecretValueResourceDescription(msrest.serialization.Model): - """This type describes a value of a secret resource. The name of this resource is the version identifier corresponding to this secret value. +class SecretValueResourceDescription(Model): + """This type describes a value of a secret resource. The name of this resource + is the version identifier corresponding to this secret value. All required parameters must be populated in order to send to Azure. @@ -19639,44 +18352,20 @@ class SecretValueResourceDescription(msrest.serialization.Model): 'value': {'key': 'properties.value', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SecretValueResourceDescription, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) -class SecretValueResourceProperties(SecretValueProperties): - """This type describes properties of a secret value resource. - - :param value: The actual value of the secret. - :type value: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SecretValueResourceProperties, self).__init__(**kwargs) - - class SeedNodeSafetyCheck(SafetyCheck): - """Represents a safety check for the seed nodes being performed by service fabric before continuing with node level operations. + """Represents a safety check for the seed nodes being performed by service + fabric before continuing with node level operations. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -19687,23 +18376,22 @@ class SeedNodeSafetyCheck(SafetyCheck): 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SeedNodeSafetyCheck, self).__init__(**kwargs) - self.kind = 'EnsureSeedNodeQuorum' # type: str + self.kind = 'EnsureSeedNodeQuorum' -class SelectedPartition(msrest.serialization.Model): - """This class returns information about the partition that the user-induced operation acted upon. +class SelectedPartition(Model): + """This class returns information about the partition that the user-induced + operation acted upon. :param service_name: The name of the service the partition belongs to. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str """ @@ -19712,33 +18400,33 @@ class SelectedPartition(msrest.serialization.Model): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SelectedPartition, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) class ServiceBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information for a specific Service Fabric service specifying what backup policy is being applied and suspend description, if any. + """Backup configuration information for a specific Service Fabric service + specifying what backup policy is being applied and suspend description, if + any. All required parameters must be populated in order to send to Azure. - :param kind: Required. The entity type of a Service Fabric entity such as Application, Service - or a Partition where periodic backups can be enabled.Constant filled by server. Possible - values include: "Invalid", "Partition", "Service", "Application". - :type kind: str or ~azure.servicefabric.models.BackupEntityKind - :param policy_name: The name of the backup policy which is applicable to this Service Fabric - application or service or partition. + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup policy is applied. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str """ @@ -19747,20 +18435,17 @@ class ServiceBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceBackupConfigurationInfo, self).__init__(**kwargs) - self.kind = 'Service' # type: str self.service_name = kwargs.get('service_name', None) + self.kind = 'Service' class ServiceBackupEntity(BackupEntity): @@ -19768,11 +18453,10 @@ class ServiceBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, - Service or a Partition where periodic backups can be enabled.Constant filled by server. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str """ @@ -19785,26 +18469,24 @@ class ServiceBackupEntity(BackupEntity): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceBackupEntity, self).__init__(**kwargs) - self.entity_kind = 'Service' # type: str self.service_name = kwargs.get('service_name', None) + self.entity_kind = 'Service' -class ServiceCorrelationDescription(msrest.serialization.Model): +class ServiceCorrelationDescription(Model): """Creates a particular correlation between services. All required parameters must be populated in order to send to Azure. - :param scheme: Required. The ServiceCorrelationScheme which describes the relationship between - this service and the service specified via ServiceName. Possible values include: "Invalid", - "Affinity", "AlignedAffinity", "NonAlignedAffinity". + :param scheme: Required. The ServiceCorrelationScheme which describes the + relationship between this service and the service specified via + ServiceName. Possible values include: 'Invalid', 'Affinity', + 'AlignedAffinity', 'NonAlignedAffinity' :type scheme: str or ~azure.servicefabric.models.ServiceCorrelationScheme - :param service_name: Required. The name of the service that the correlation relationship is - established with. + :param service_name: Required. The name of the service that the + correlation relationship is established with. :type service_name: str """ @@ -19818,91 +18500,67 @@ class ServiceCorrelationDescription(msrest.serialization.Model): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceCorrelationDescription, self).__init__(**kwargs) - self.scheme = kwargs['scheme'] - self.service_name = kwargs['service_name'] + self.scheme = kwargs.get('scheme', None) + self.service_name = kwargs.get('service_name', None) class ServiceEvent(FabricEvent): """Represents the base for all Service Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, ServiceHealthReportExpiredEvent, ServiceNewHealthReportEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, + ServiceNewHealthReportEvent, ServiceHealthReportExpiredEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent', 'ServiceNewHealthReport': 'ServiceNewHealthReportEvent'} + 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceNewHealthReport': 'ServiceNewHealthReportEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceEvent, self).__init__(**kwargs) - self.kind = 'ServiceEvent' # type: str - self.service_id = kwargs['service_id'] + self.service_id = kwargs.get('service_id', None) + self.kind = 'ServiceEvent' class ServiceCreatedEvent(ServiceEvent): @@ -19910,44 +18568,25 @@ class ServiceCreatedEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str :param service_type_name: Required. Service type name. :type service_type_name: str @@ -19967,17 +18606,18 @@ class ServiceCreatedEvent(ServiceEvent): :type min_replica_set_size: int :param service_package_version: Required. Version of Service package. :type service_package_version: str - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, 'service_type_name': {'required': True}, 'application_name': {'required': True}, @@ -19992,11 +18632,11 @@ class ServiceCreatedEvent(ServiceEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, @@ -20010,22 +18650,19 @@ class ServiceCreatedEvent(ServiceEvent): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceCreatedEvent, self).__init__(**kwargs) - self.kind = 'ServiceCreated' # type: str - self.service_type_name = kwargs['service_type_name'] - self.application_name = kwargs['application_name'] - self.application_type_name = kwargs['application_type_name'] - self.service_instance = kwargs['service_instance'] - self.is_stateful = kwargs['is_stateful'] - self.partition_count = kwargs['partition_count'] - self.target_replica_set_size = kwargs['target_replica_set_size'] - self.min_replica_set_size = kwargs['min_replica_set_size'] - self.service_package_version = kwargs['service_package_version'] - self.partition_id = kwargs['partition_id'] + self.service_type_name = kwargs.get('service_type_name', None) + self.application_name = kwargs.get('application_name', None) + self.application_type_name = kwargs.get('application_type_name', None) + self.service_instance = kwargs.get('service_instance', None) + self.is_stateful = kwargs.get('is_stateful', None) + self.partition_count = kwargs.get('partition_count', None) + self.target_replica_set_size = kwargs.get('target_replica_set_size', None) + self.min_replica_set_size = kwargs.get('min_replica_set_size', None) + self.service_package_version = kwargs.get('service_package_version', None) + self.partition_id = kwargs.get('partition_id', None) + self.kind = 'ServiceCreated' class ServiceDeletedEvent(ServiceEvent): @@ -20033,44 +18670,25 @@ class ServiceDeletedEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str :param service_type_name: Required. Service type name. :type service_type_name: str @@ -20093,9 +18711,9 @@ class ServiceDeletedEvent(ServiceEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, 'service_type_name': {'required': True}, 'application_name': {'required': True}, @@ -20109,11 +18727,11 @@ class ServiceDeletedEvent(ServiceEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, @@ -20126,87 +18744,96 @@ class ServiceDeletedEvent(ServiceEvent): 'service_package_version': {'key': 'ServicePackageVersion', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceDeletedEvent, self).__init__(**kwargs) - self.kind = 'ServiceDeleted' # type: str - self.service_type_name = kwargs['service_type_name'] - self.application_name = kwargs['application_name'] - self.application_type_name = kwargs['application_type_name'] - self.service_instance = kwargs['service_instance'] - self.is_stateful = kwargs['is_stateful'] - self.partition_count = kwargs['partition_count'] - self.target_replica_set_size = kwargs['target_replica_set_size'] - self.min_replica_set_size = kwargs['min_replica_set_size'] - self.service_package_version = kwargs['service_package_version'] + self.service_type_name = kwargs.get('service_type_name', None) + self.application_name = kwargs.get('application_name', None) + self.application_type_name = kwargs.get('application_type_name', None) + self.service_instance = kwargs.get('service_instance', None) + self.is_stateful = kwargs.get('is_stateful', None) + self.partition_count = kwargs.get('partition_count', None) + self.target_replica_set_size = kwargs.get('target_replica_set_size', None) + self.min_replica_set_size = kwargs.get('min_replica_set_size', None) + self.service_package_version = kwargs.get('service_package_version', None) + self.kind = 'ServiceDeleted' -class ServiceDescription(msrest.serialization.Model): - """A ServiceDescription contains all of the information necessary to create a service. +class ServiceDescription(Model): + """A ServiceDescription contains all of the information necessary to create a + service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceDescription, StatelessServiceDescription. + sub-classes are: StatefulServiceDescription, StatelessServiceDescription All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified in the service - manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. Initialization data - is passed to service instances or replicas when they are created. + :param initialization_data: The initialization data as an array of bytes. + Initialization data is passed to service instances or replicas when they + are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an object. - :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an + object. + :type partition_description: + ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost + property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service package to be used for a - service. Possible values include: "SharedProcess", "ExclusiveProcess". + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS system service to be - enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param tags_required_to_place: Tags for placement of this service. - :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_place: + ~azure.servicefabric.models.NodeTagsDescription :param tags_required_to_run: Tags for running of this service. - :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_run: + ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { - 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, + 'service_kind': {'required': True}, } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -20223,23 +18850,20 @@ class ServiceDescription(msrest.serialization.Model): 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceDescription', 'Stateless': 'StatelessServiceDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceDescription, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.application_name = kwargs.get('application_name', None) - self.service_name = kwargs['service_name'] - self.service_type_name = kwargs['service_type_name'] + self.service_name = kwargs.get('service_name', None) + self.service_type_name = kwargs.get('service_type_name', None) self.initialization_data = kwargs.get('initialization_data', None) - self.partition_description = kwargs['partition_description'] + self.partition_description = kwargs.get('partition_description', None) self.placement_constraints = kwargs.get('placement_constraints', None) self.correlation_scheme = kwargs.get('correlation_scheme', None) self.service_load_metrics = kwargs.get('service_load_metrics', None) @@ -20251,29 +18875,34 @@ def __init__( self.scaling_policies = kwargs.get('scaling_policies', None) self.tags_required_to_place = kwargs.get('tags_required_to_place', None) self.tags_required_to_run = kwargs.get('tags_required_to_run', None) + self.service_kind = None -class ServiceFromTemplateDescription(msrest.serialization.Model): - """Defines description for creating a Service Fabric service from a template defined in the application manifest. +class ServiceFromTemplateDescription(Model): + """Defines description for creating a Service Fabric service from a template + defined in the application manifest. All required parameters must be populated in order to send to Azure. - :param application_name: Required. The name of the application, including the 'fabric:' URI - scheme. + :param application_name: Required. The name of the application, including + the 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified in the service - manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str - :param initialization_data: The initialization data for the newly created service instance. + :param initialization_data: The initialization data for the newly created + service instance. :type initialization_data: list[int] - :param service_package_activation_mode: The activation mode of service package to be used for a - service. Possible values include: "SharedProcess", "ExclusiveProcess". + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS system service to be - enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. :type service_dns_name: str """ @@ -20292,14 +18921,11 @@ class ServiceFromTemplateDescription(msrest.serialization.Model): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceFromTemplateDescription, self).__init__(**kwargs) - self.application_name = kwargs['application_name'] - self.service_name = kwargs['service_name'] - self.service_type_name = kwargs['service_type_name'] + self.application_name = kwargs.get('application_name', None) + self.service_name = kwargs.get('service_name', None) + self.service_type_name = kwargs.get('service_type_name', None) self.initialization_data = kwargs.get('initialization_data', None) self.service_package_activation_mode = kwargs.get('service_package_activation_mode', None) self.service_dns_name = kwargs.get('service_dns_name', None) @@ -20308,26 +18934,30 @@ def __init__( class ServiceHealth(EntityHealth): """Information about the health of a Service Fabric service. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: The name of the service whose health information is described by this object. + :param name: The name of the service whose health information is described + by this object. :type name: str - :param partition_health_states: The list of partition health states associated with the - service. - :type partition_health_states: list[~azure.servicefabric.models.PartitionHealthState] + :param partition_health_states: The list of partition health states + associated with the service. + :type partition_health_states: + list[~azure.servicefabric.models.PartitionHealthState] """ _attribute_map = { @@ -20339,43 +18969,40 @@ class ServiceHealth(EntityHealth): 'partition_health_states': {'key': 'PartitionHealthStates', 'type': '[PartitionHealthState]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceHealth, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.partition_health_states = kwargs.get('partition_health_states', None) class ServiceHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a service, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a service, containing information about + the data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param service_name: Name of the service whose health evaluation is described by this object. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: Name of the service whose health evaluation is + described by this object. :type service_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the service. The types of the unhealthy evaluations can be - PartitionsHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the service. The types of the + unhealthy evaluations can be PartitionsHealthEvaluation or + EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -20383,21 +19010,18 @@ class ServiceHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Service' # type: str self.service_name = kwargs.get('service_name', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Service' class ServiceHealthReportExpiredEvent(ServiceEvent): @@ -20405,44 +19029,25 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str :param instance_id: Required. Id of Service instance. :type instance_id: long @@ -20458,16 +19063,17 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, 'instance_id': {'required': True}, 'source_id': {'required': True}, @@ -20481,11 +19087,11 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -20498,31 +19104,32 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceHealthReportExpiredEvent, self).__init__(**kwargs) - self.kind = 'ServiceHealthReportExpired' # type: str - self.instance_id = kwargs['instance_id'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.instance_id = kwargs.get('instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ServiceHealthReportExpired' class ServiceHealthState(EntityHealthState): - """Represents the health state of a service, which contains the service identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param service_name: Name of the service whose health state is represented by this object. + """Represents the health state of a service, which contains the service + identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param service_name: Name of the service whose health state is represented + by this object. :type service_name: str """ @@ -20531,27 +19138,28 @@ class ServiceHealthState(EntityHealthState): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceHealthState, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) class ServiceHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a service, which contains the service name, its aggregated health state and any partitions that respect the filters in the cluster health chunk query description. + """Represents the health state chunk of a service, which contains the service + name, its aggregated health state and any partitions that respect the + filters in the cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_name: The name of the service whose health state chunk is provided in this - object. + :param service_name: The name of the service whose health state chunk is + provided in this object. :type service_name: str - :param partition_health_state_chunks: The list of partition health state chunks belonging to - the service that respect the filters in the cluster health chunk query description. - :type partition_health_state_chunks: ~azure.servicefabric.models.PartitionHealthStateChunkList + :param partition_health_state_chunks: The list of partition health state + chunks belonging to the service that respect the filters in the cluster + health chunk query description. + :type partition_health_state_chunks: + ~azure.servicefabric.models.PartitionHealthStateChunkList """ _attribute_map = { @@ -20560,20 +19168,18 @@ class ServiceHealthStateChunk(EntityHealthStateChunk): 'partition_health_state_chunks': {'key': 'PartitionHealthStateChunks', 'type': 'PartitionHealthStateChunkList'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceHealthStateChunk, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) self.partition_health_state_chunks = kwargs.get('partition_health_state_chunks', None) -class ServiceHealthStateChunkList(msrest.serialization.Model): - """The list of service health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. +class ServiceHealthStateChunkList(Model): + """The list of service health state chunks that respect the input filters in + the chunk query. Returned by get cluster health state chunks query. - :param items: The list of service health state chunks that respect the input filters in the - chunk query. + :param items: The list of service health state chunks that respect the + input filters in the chunk query. :type items: list[~azure.servicefabric.models.ServiceHealthStateChunk] """ @@ -20581,58 +19187,67 @@ class ServiceHealthStateChunkList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServiceHealthStateChunk]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceHealthStateChunkList, self).__init__(**kwargs) self.items = kwargs.get('items', None) -class ServiceHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a service should be included as a child of an application in the cluster health chunk. -The services are only returned if the parent application matches a filter specified in the cluster health chunk query description. -One filter can match zero, one or multiple services, depending on its properties. - - :param service_name_filter: The name of the service that matches the filter. The filter is - applied only to the specified service, if it exists. - If the service doesn't exist, no service is returned in the cluster health chunk based on this - filter. - If the service exists, it is included as the application's child if the health state matches - the other filter properties. - If not specified, all services that match the parent filters (if any) are taken into - consideration and matched against the other filter members, like health state filter. +class ServiceHealthStateFilter(Model): + """Defines matching criteria to determine whether a service should be included + as a child of an application in the cluster health chunk. + The services are only returned if the parent application matches a filter + specified in the cluster health chunk query description. + One filter can match zero, one or multiple services, depending on its + properties. + + :param service_name_filter: The name of the service that matches the + filter. The filter is applied only to the specified service, if it exists. + If the service doesn't exist, no service is returned in the cluster health + chunk based on this filter. + If the service exists, it is included as the application's child if the + health state matches the other filter properties. + If not specified, all services that match the parent filters (if any) are + taken into consideration and matched against the other filter members, + like health state filter. :type service_name_filter: str - :param health_state_filter: The filter for the health state of the services. It allows - selecting services if they match the desired health states. - The possible values are integer value of one of the following health states. Only services - that match the filter are returned. All services are used to evaluate the cluster aggregated - health state. - If not specified, default value is None, unless the service name is specified. If the filter - has default value and service name is specified, the matching service is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches services with HealthState value of OK (2) - and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + services. It allows selecting services if they match the desired health + states. + The possible values are integer value of one of the following health + states. Only services that match the filter are returned. All services are + used to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the service name is + specified. If the filter has default value and service name is specified, + the matching service is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches services with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int - :param partition_filters: Defines a list of filters that specify which partitions to be - included in the returned cluster health chunk as children of the service. The partitions are - returned only if the parent service matches a filter. - If the list is empty, no partitions are returned. All the partitions are used to evaluate the - parent service aggregated health state, regardless of the input filters. + :param partition_filters: Defines a list of filters that specify which + partitions to be included in the returned cluster health chunk as children + of the service. The partitions are returned only if the parent service + matches a filter. + If the list is empty, no partitions are returned. All the partitions are + used to evaluate the parent service aggregated health state, regardless of + the input filters. The service filter may specify multiple partition filters. - For example, it can specify a filter to return all partitions with health state Error and - another filter to always include a partition identified by its partition ID. - :type partition_filters: list[~azure.servicefabric.models.PartitionHealthStateFilter] + For example, it can specify a filter to return all partitions with health + state Error and another filter to always include a partition identified by + its partition ID. + :type partition_filters: + list[~azure.servicefabric.models.PartitionHealthStateFilter] """ _attribute_map = { @@ -20641,17 +19256,14 @@ class ServiceHealthStateFilter(msrest.serialization.Model): 'partition_filters': {'key': 'PartitionFilters', 'type': '[PartitionHealthStateFilter]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceHealthStateFilter, self).__init__(**kwargs) self.service_name_filter = kwargs.get('service_name_filter', None) self.health_state_filter = kwargs.get('health_state_filter', 0) self.partition_filters = kwargs.get('partition_filters', None) -class ServiceIdentity(msrest.serialization.Model): +class ServiceIdentity(Model): """Map service identity friendly name to an application identity. :param name: The identity friendly name. @@ -20665,48 +19277,47 @@ class ServiceIdentity(msrest.serialization.Model): 'identity_ref': {'key': 'identityRef', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceIdentity, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.identity_ref = kwargs.get('identity_ref', None) -class ServiceInfo(msrest.serialization.Model): +class ServiceInfo(Model): """Information about a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceInfo, StatelessServiceInfo. + sub-classes are: StatefulServiceInfo, StatelessServiceInfo All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded representation of the service - name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param id: The identity of the service. This ID is an encoded + representation of the service name. This is used in the REST APIs to + identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type id: str - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service manifest. + :param type_name: Name of the service type as specified in the service + manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values include: "Unknown", - "Active", "Upgrading", "Deleting", "Creating", "Failed". + :param service_status: The status of the application. Possible values + include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', + 'Failed' :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -20715,54 +19326,55 @@ class ServiceInfo(msrest.serialization.Model): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceInfo', 'Stateless': 'StatelessServiceInfo'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) - self.service_kind = None # type: Optional[str] self.name = kwargs.get('name', None) self.type_name = kwargs.get('type_name', None) self.manifest_version = kwargs.get('manifest_version', None) self.health_state = kwargs.get('health_state', None) self.service_status = kwargs.get('service_status', None) self.is_service_group = kwargs.get('is_service_group', None) + self.service_kind = None -class ServiceLoadMetricDescription(msrest.serialization.Model): +class ServiceLoadMetricDescription(Model): """Specifies a metric to load balance a service during runtime. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the metric. If the service chooses to report load during - runtime, the load metric name should match the name that is specified in Name exactly. Note - that metric names are case-sensitive. + :param name: Required. The name of the metric. If the service chooses to + report load during runtime, the load metric name should match the name + that is specified in Name exactly. Note that metric names are + case-sensitive. :type name: str - :param weight: The service load metric relative weight, compared to other metrics configured - for this service, as a number. Possible values include: "Zero", "Low", "Medium", "High". + :param weight: The service load metric relative weight, compared to other + metrics configured for this service, as a number. Possible values include: + 'Zero', 'Low', 'Medium', 'High' :type weight: str or ~azure.servicefabric.models.ServiceLoadMetricWeight - :param primary_default_load: Used only for Stateful services. The default amount of load, as a - number, that this service creates for this metric when it is a Primary replica. + :param primary_default_load: Used only for Stateful services. The default + amount of load, as a number, that this service creates for this metric + when it is a Primary replica. :type primary_default_load: int - :param secondary_default_load: Used only for Stateful services. The default amount of load, as - a number, that this service creates for this metric when it is a Secondary replica. + :param secondary_default_load: Used only for Stateful services. The + default amount of load, as a number, that this service creates for this + metric when it is a Secondary replica. :type secondary_default_load: int - :param default_load: Used only for Stateless services. The default amount of load, as a number, - that this service creates for this metric. + :param default_load: Used only for Stateless services. The default amount + of load, as a number, that this service creates for this metric. :type default_load: int """ @@ -20778,27 +19390,25 @@ class ServiceLoadMetricDescription(msrest.serialization.Model): 'default_load': {'key': 'DefaultLoad', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceLoadMetricDescription, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.weight = kwargs.get('weight', None) self.primary_default_load = kwargs.get('primary_default_load', None) self.secondary_default_load = kwargs.get('secondary_default_load', None) self.default_load = kwargs.get('default_load', None) -class ServiceNameInfo(msrest.serialization.Model): +class ServiceNameInfo(Model): """Information about the service name. - :param id: The identity of the service. This ID is an encoded representation of the service - name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param id: The identity of the service. This ID is an encoded + representation of the service name. This is used in the REST APIs to + identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type id: str :param name: The full name of the service with 'fabric:' URI scheme. :type name: str @@ -20809,10 +19419,7 @@ class ServiceNameInfo(msrest.serialization.Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceNameInfo, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.name = kwargs.get('name', None) @@ -20823,44 +19430,25 @@ class ServiceNewHealthReportEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str :param instance_id: Required. Id of Service instance. :type instance_id: long @@ -20876,16 +19464,17 @@ class ServiceNewHealthReportEvent(ServiceEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, 'instance_id': {'required': True}, 'source_id': {'required': True}, @@ -20899,11 +19488,11 @@ class ServiceNewHealthReportEvent(ServiceEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -20916,44 +19505,44 @@ class ServiceNewHealthReportEvent(ServiceEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceNewHealthReportEvent, self).__init__(**kwargs) - self.kind = 'ServiceNewHealthReport' # type: str - self.instance_id = kwargs['instance_id'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] - - -class ServicePartitionInfo(msrest.serialization.Model): + self.instance_id = kwargs.get('instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ServiceNewHealthReport' + + +class ServicePartitionInfo(Model): """Information about a partition of a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServicePartitionInfo, StatelessServicePartitionInfo. + sub-classes are: StatefulServicePartitionInfo, + StatelessServicePartitionInfo All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service partition. Possible values - include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". - :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, partitioning scheme and - keys supported by it. - :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param partition_status: The status of the service fabric service + partition. Possible values include: 'Invalid', 'Ready', 'NotReady', + 'InQuorumLoss', 'Reconfiguring', 'Deleting' + :type partition_status: str or + ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, + partitioning scheme and keys supported by it. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -20961,40 +19550,39 @@ class ServicePartitionInfo(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServicePartitionInfo', 'Stateless': 'StatelessServicePartitionInfo'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServicePartitionInfo, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.health_state = kwargs.get('health_state', None) self.partition_status = kwargs.get('partition_status', None) self.partition_information = kwargs.get('partition_information', None) + self.service_kind = None -class ServicePlacementPolicyDescription(msrest.serialization.Model): +class ServicePlacementPolicyDescription(Model): """Describes the policy to be used for placement of a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, ServicePlacementInvalidDomainPolicyDescription, ServicePlacementNonPartiallyPlaceServicePolicyDescription, ServicePlacementPreferPrimaryDomainPolicyDescription, ServicePlacementRequiredDomainPolicyDescription, ServicePlacementRequireDomainDistributionPolicyDescription. + sub-classes are: ServicePlacementInvalidDomainPolicyDescription, + ServicePlacementNonPartiallyPlaceServicePolicyDescription, + ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, + ServicePlacementPreferPrimaryDomainPolicyDescription, + ServicePlacementRequiredDomainPolicyDescription, + ServicePlacementRequireDomainDistributionPolicyDescription All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param type: Required. Constant filled by server. + :type type: str """ _validation = { @@ -21006,29 +19594,26 @@ class ServicePlacementPolicyDescription(msrest.serialization.Model): } _subtype_map = { - 'type': {'AllowMultipleStatelessInstancesOnNode': 'ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription', 'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} + 'type': {'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'AllowMultipleStatelessInstancesOnNode': 'ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServicePlacementPolicyDescription, self).__init__(**kwargs) - self.type = None # type: Optional[str] + self.type = None class ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service allowing multiple stateless instances of a partition of the service to be placed on a node. + """Describes the policy to be used for placement of a Service Fabric service + allowing multiple stateless instances of a partition of the service to be + placed on a node. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: Holdover from other policy descriptions, not used for this policy, values - are ignored by runtime. Keeping it for any backwards-compatibility with clients. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: Holdover from other policy descriptions, not used for + this policy, values are ignored by runtime. Keeping it for any + backwards-compatibility with clients. :type domain_name: str """ @@ -21041,26 +19626,23 @@ class ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription(Ser 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, self).__init__(**kwargs) - self.type = 'AllowMultipleStatelessInstancesOnNode' # type: str self.domain_name = kwargs.get('domain_name', None) + self.type = 'AllowMultipleStatelessInstancesOnNode' class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where a particular fault or upgrade domain should not be used for placement of the instances or replicas of that service. + """Describes the policy to be used for placement of a Service Fabric service + where a particular fault or upgrade domain should not be used for placement + of the instances or replicas of that service. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: The name of the domain that should not be used for placement. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should not be used for + placement. :type domain_name: str """ @@ -21073,25 +19655,21 @@ class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescr 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServicePlacementInvalidDomainPolicyDescription, self).__init__(**kwargs) - self.type = 'InvalidDomain' # type: str self.domain_name = kwargs.get('domain_name', None) + self.type = 'InvalidDomain' class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where all replicas must be able to be placed in order for any replicas to be created. + """Describes the policy to be used for placement of a Service Fabric service + where all replicas must be able to be placed in order for any replicas to + be created. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param type: Required. Constant filled by server. + :type type: str """ _validation = { @@ -21102,27 +19680,29 @@ class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacement 'type': {'key': 'Type', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServicePlacementNonPartiallyPlaceServicePolicyDescription, self).__init__(**kwargs) - self.type = 'NonPartiallyPlaceService' # type: str + self.type = 'NonPartiallyPlaceService' class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where the service's Primary replicas should optimally be placed in a particular domain. - -This placement policy is usually used with fault domains in scenarios where the Service Fabric cluster is geographically distributed in order to indicate that a service's primary replica should be located in a particular fault domain, which in geo-distributed scenarios usually aligns with regional or datacenter boundaries. Note that since this is an optimization it is possible that the Primary replica may not end up located in this domain due to failures, capacity limits, or other constraints. + """Describes the policy to be used for placement of a Service Fabric service + where the service's Primary replicas should optimally be placed in a + particular domain. + This placement policy is usually used with fault domains in scenarios where + the Service Fabric cluster is geographically distributed in order to + indicate that a service's primary replica should be located in a particular + fault domain, which in geo-distributed scenarios usually aligns with + regional or datacenter boundaries. Note that since this is an optimization + it is possible that the Primary replica may not end up located in this + domain due to failures, capacity limits, or other constraints. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: The name of the domain that should used for placement as per this policy. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should used for placement + as per this policy. :type domain_name: str """ @@ -21135,26 +19715,23 @@ class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolic 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServicePlacementPreferPrimaryDomainPolicyDescription, self).__init__(**kwargs) - self.type = 'PreferPrimaryDomain' # type: str self.domain_name = kwargs.get('domain_name', None) + self.type = 'PreferPrimaryDomain' class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where the instances or replicas of that service must be placed in a particular domain. + """Describes the policy to be used for placement of a Service Fabric service + where the instances or replicas of that service must be placed in a + particular domain. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: The name of the domain that should used for placement as per this policy. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should used for placement + as per this policy. :type domain_name: str """ @@ -21167,28 +19744,31 @@ class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDesc 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServicePlacementRequiredDomainPolicyDescription, self).__init__(**kwargs) - self.type = 'RequireDomain' # type: str self.domain_name = kwargs.get('domain_name', None) + self.type = 'RequireDomain' class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where two replicas from the same partition should never be placed in the same fault or upgrade domain. - -While this is not common it can expose the service to an increased risk of concurrent failures due to unplanned outages or other cases of subsequent/concurrent failures. As an example, consider a case where replicas are deployed across different data center, with one replica per location. In the event that one of the datacenters goes offline, normally the replica that was placed in that datacenter will be packed into one of the remaining datacenters. If this is not desirable then this policy should be set. + """Describes the policy to be used for placement of a Service Fabric service + where two replicas from the same partition should never be placed in the + same fault or upgrade domain. + While this is not common it can expose the service to an increased risk of + concurrent failures due to unplanned outages or other cases of + subsequent/concurrent failures. As an example, consider a case where + replicas are deployed across different data center, with one replica per + location. In the event that one of the datacenters goes offline, normally + the replica that was placed in that datacenter will be packed into one of + the remaining datacenters. If this is not desirable then this policy should + be set. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: The name of the domain that should used for placement as per this policy. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should used for placement + as per this policy. :type domain_name: str """ @@ -21201,40 +19781,40 @@ class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacemen 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServicePlacementRequireDomainDistributionPolicyDescription, self).__init__(**kwargs) - self.type = 'RequireDomainDistribution' # type: str self.domain_name = kwargs.get('domain_name', None) + self.type = 'RequireDomainDistribution' -class ServiceProperties(msrest.serialization.Model): +class ServiceProperties(Model): """Describes properties of a service resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. :param description: User readable description of the service. :type description: str - :param replica_count: The number of replicas of the service to create. Defaults to 1 if not - specified. + :param replica_count: The number of replicas of the service to create. + Defaults to 1 if not specified. :type replica_count: int - :param execution_policy: The execution policy of the service. + :param execution_policy: The execution policy of the service :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies. - :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :param auto_scaling_policies: Auto scaling policies + :type auto_scaling_policies: + list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the service. + :ivar status_details: Gives additional information about the current + status of the service. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. Possible values - include: "Invalid", "Ok", "Warning", "Error", "Unknown". + :ivar health_state: Describes the health state of an application resource. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional - details from service fabric Health Manager for the user to know why the service is marked - unhealthy. + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', + this additional details from service fabric Health Manager for the user to + know why the service is marked unhealthy. :vartype unhealthy_evaluation: str :param identity_refs: The service identity list. :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] @@ -21262,10 +19842,7 @@ class ServiceProperties(msrest.serialization.Model): 'dns_name': {'key': 'dnsName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceProperties, self).__init__(**kwargs) self.description = kwargs.get('description', None) self.replica_count = kwargs.get('replica_count', None) @@ -21279,19 +19856,22 @@ def __init__( self.dns_name = kwargs.get('dns_name', None) -class ServiceReplicaProperties(msrest.serialization.Model): +class ServiceReplicaProperties(Model): """Describes the properties of a service replica. All required parameters must be populated in order to send to Azure. - :param os_type: Required. The operation system required by the code in service. Possible values - include: "Linux", "Windows". + :param os_type: Required. The operation system required by the code in + service. Possible values include: 'Linux', 'Windows' :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that forms the service. A - code package describes the container and the properties for running it. All the code packages - are started together on the same host and share the same context (network, process etc.). - :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service needs to be part of. + :param code_packages: Required. Describes the set of code packages that + forms the service. A code package describes the container and the + properties for running it. All the code packages are started together on + the same host and share the same context (network, process etc.). + :type code_packages: + list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service + needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef @@ -21309,13 +19889,10 @@ class ServiceReplicaProperties(msrest.serialization.Model): 'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsRef'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceReplicaProperties, self).__init__(**kwargs) - self.os_type = kwargs['os_type'] - self.code_packages = kwargs['code_packages'] + self.os_type = kwargs.get('os_type', None) + self.code_packages = kwargs.get('code_packages', None) self.network_refs = kwargs.get('network_refs', None) self.diagnostics = kwargs.get('diagnostics', None) @@ -21325,14 +19902,17 @@ class ServiceReplicaDescription(ServiceReplicaProperties): All required parameters must be populated in order to send to Azure. - :param os_type: Required. The operation system required by the code in service. Possible values - include: "Linux", "Windows". + :param os_type: Required. The operation system required by the code in + service. Possible values include: 'Linux', 'Windows' :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that forms the service. A - code package describes the container and the properties for running it. All the code packages - are started together on the same host and share the same context (network, process etc.). - :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service needs to be part of. + :param code_packages: Required. Describes the set of code packages that + forms the service. A code package describes the container and the + properties for running it. All the code packages are started together on + the same host and share the same context (network, process etc.). + :type code_packages: + list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service + needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef @@ -21354,54 +19934,57 @@ class ServiceReplicaDescription(ServiceReplicaProperties): 'replica_name': {'key': 'replicaName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceReplicaDescription, self).__init__(**kwargs) - self.replica_name = kwargs['replica_name'] + self.replica_name = kwargs.get('replica_name', None) -class ServiceResourceDescription(msrest.serialization.Model): +class ServiceResourceDescription(Model): """This type describes a service resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the Service resource. :type name: str - :param os_type: Required. The operation system required by the code in service. Possible values - include: "Linux", "Windows". + :param os_type: Required. The operation system required by the code in + service. Possible values include: 'Linux', 'Windows' :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that forms the service. A - code package describes the container and the properties for running it. All the code packages - are started together on the same host and share the same context (network, process etc.). - :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service needs to be part of. + :param code_packages: Required. Describes the set of code packages that + forms the service. A code package describes the container and the + properties for running it. All the code packages are started together on + the same host and share the same context (network, process etc.). + :type code_packages: + list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service + needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef :param description: User readable description of the service. :type description: str - :param replica_count: The number of replicas of the service to create. Defaults to 1 if not - specified. + :param replica_count: The number of replicas of the service to create. + Defaults to 1 if not specified. :type replica_count: int - :param execution_policy: The execution policy of the service. + :param execution_policy: The execution policy of the service :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies. - :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :param auto_scaling_policies: Auto scaling policies + :type auto_scaling_policies: + list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the service. + :ivar status_details: Gives additional information about the current + status of the service. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. Possible values - include: "Invalid", "Ok", "Warning", "Error", "Unknown". + :ivar health_state: Describes the health state of an application resource. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional - details from service fabric Health Manager for the user to know why the service is marked - unhealthy. + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', + this additional details from service fabric Health Manager for the user to + know why the service is marked unhealthy. :vartype unhealthy_evaluation: str :param identity_refs: The service identity list. :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] @@ -21437,14 +20020,11 @@ class ServiceResourceDescription(msrest.serialization.Model): 'dns_name': {'key': 'properties.dnsName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceResourceDescription, self).__init__(**kwargs) - self.name = kwargs['name'] - self.os_type = kwargs['os_type'] - self.code_packages = kwargs['code_packages'] + self.name = kwargs.get('name', None) + self.os_type = kwargs.get('os_type', None) + self.code_packages = kwargs.get('code_packages', None) self.network_refs = kwargs.get('network_refs', None) self.diagnostics = kwargs.get('diagnostics', None) self.description = kwargs.get('description', None) @@ -21459,131 +20039,39 @@ def __init__( self.dns_name = kwargs.get('dns_name', None) -class ServiceResourceProperties(ServiceReplicaProperties, ServiceProperties): - """This type describes properties of a service resource. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :param description: User readable description of the service. - :type description: str - :param replica_count: The number of replicas of the service to create. Defaults to 1 if not - specified. - :type replica_count: int - :param execution_policy: The execution policy of the service. - :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies. - :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". - :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the service. - :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. Possible values - include: "Invalid", "Ok", "Warning", "Error", "Unknown". - :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional - details from service fabric Health Manager for the user to know why the service is marked - unhealthy. - :vartype unhealthy_evaluation: str - :param identity_refs: The service identity list. - :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] - :param dns_name: Dns name of the service. - :type dns_name: str - :param os_type: Required. The operation system required by the code in service. Possible values - include: "Linux", "Windows". - :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that forms the service. A - code package describes the container and the properties for running it. All the code packages - are started together on the same host and share the same context (network, process etc.). - :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service needs to be part of. - :type network_refs: list[~azure.servicefabric.models.NetworkRef] - :param diagnostics: Reference to sinks in DiagnosticsDescription. - :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef - """ - - _validation = { - 'status': {'readonly': True}, - 'status_details': {'readonly': True}, - 'health_state': {'readonly': True}, - 'unhealthy_evaluation': {'readonly': True}, - 'os_type': {'required': True}, - 'code_packages': {'required': True}, - } - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'replica_count': {'key': 'replicaCount', 'type': 'int'}, - 'execution_policy': {'key': 'executionPolicy', 'type': 'ExecutionPolicy'}, - 'auto_scaling_policies': {'key': 'autoScalingPolicies', 'type': '[AutoScalingPolicy]'}, - 'status': {'key': 'status', 'type': 'str'}, - 'status_details': {'key': 'statusDetails', 'type': 'str'}, - 'health_state': {'key': 'healthState', 'type': 'str'}, - 'unhealthy_evaluation': {'key': 'unhealthyEvaluation', 'type': 'str'}, - 'identity_refs': {'key': 'identityRefs', 'type': '[ServiceIdentity]'}, - 'dns_name': {'key': 'dnsName', 'type': 'str'}, - 'os_type': {'key': 'osType', 'type': 'str'}, - 'code_packages': {'key': 'codePackages', 'type': '[ContainerCodePackageProperties]'}, - 'network_refs': {'key': 'networkRefs', 'type': '[NetworkRef]'}, - 'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsRef'}, - } - - def __init__( - self, - **kwargs - ): - super(ServiceResourceProperties, self).__init__(**kwargs) - self.description = kwargs.get('description', None) - self.replica_count = kwargs.get('replica_count', None) - self.execution_policy = kwargs.get('execution_policy', None) - self.auto_scaling_policies = kwargs.get('auto_scaling_policies', None) - self.status = None - self.status_details = None - self.health_state = None - self.unhealthy_evaluation = None - self.identity_refs = kwargs.get('identity_refs', None) - self.dns_name = kwargs.get('dns_name', None) - self.os_type = kwargs['os_type'] - self.code_packages = kwargs['code_packages'] - self.network_refs = kwargs.get('network_refs', None) - self.diagnostics = kwargs.get('diagnostics', None) - - class ServicesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for services of a certain service type belonging to an application, containing health evaluations for each unhealthy service that impacted current aggregated health state. Can be returned when evaluating application health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for services of a certain service type + belonging to an application, containing health evaluations for each + unhealthy service that impacted current aggregated health state. Can be + returned when evaluating application health and the aggregated health state + is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param service_type_name: Name of the service type of the services. :type service_type_name: str - :param max_percent_unhealthy_services: Maximum allowed percentage of unhealthy services from - the ServiceTypeHealthPolicy. + :param max_percent_unhealthy_services: Maximum allowed percentage of + unhealthy services from the ServiceTypeHealthPolicy. :type max_percent_unhealthy_services: int - :param total_count: Total number of services of the current service type in the application - from the health store. + :param total_count: Total number of services of the current service type + in the application from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy ServiceHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ServiceHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -21591,55 +20079,57 @@ class ServicesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServicesHealthEvaluation, self).__init__(**kwargs) - self.kind = 'Services' # type: str self.service_type_name = kwargs.get('service_type_name', None) self.max_percent_unhealthy_services = kwargs.get('max_percent_unhealthy_services', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'Services' -class ServiceTypeDescription(msrest.serialization.Model): - """Describes a service type defined in the service manifest of a provisioned application type. The properties the ones defined in the service manifest. +class ServiceTypeDescription(Model): + """Describes a service type defined in the service manifest of a provisioned + application type. The properties the ones defined in the service manifest. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceTypeDescription, StatelessServiceTypeDescription. + sub-classes are: StatefulServiceTypeDescription, + StatelessServiceTypeDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. - Possible values include: "Invalid", "Stateless", "Stateful". - :type kind: str or ~azure.servicefabric.models.ServiceKind - :param is_stateful: Indicates whether the service type is a stateful service type or a - stateless service type. This property is true if the service type is a stateful service type, - false otherwise. + :param is_stateful: Indicates whether the service type is a stateful + service type or a stateless service type. This property is true if the + service type is a stateful service type, false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when instantiating this - service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when + instantiating this service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy descriptions. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy + descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :type extensions: + list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -21647,34 +20137,31 @@ class ServiceTypeDescription(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'Stateful': 'StatefulServiceTypeDescription', 'Stateless': 'StatelessServiceTypeDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceTypeDescription, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.is_stateful = kwargs.get('is_stateful', None) self.service_type_name = kwargs.get('service_type_name', None) self.placement_constraints = kwargs.get('placement_constraints', None) self.load_metrics = kwargs.get('load_metrics', None) self.service_placement_policies = kwargs.get('service_placement_policies', None) self.extensions = kwargs.get('extensions', None) + self.kind = None -class ServiceTypeExtensionDescription(msrest.serialization.Model): +class ServiceTypeExtensionDescription(Model): """Describes extension of a service type defined in the service manifest. :param key: The name of the extension. @@ -21688,53 +20175,51 @@ class ServiceTypeExtensionDescription(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceTypeExtensionDescription, self).__init__(**kwargs) self.key = kwargs.get('key', None) self.value = kwargs.get('value', None) -class ServiceTypeHealthPolicy(msrest.serialization.Model): - """Represents the health policy used to evaluate the health of services belonging to a service type. - - :param max_percent_unhealthy_partitions_per_service: The maximum allowed percentage of - unhealthy partitions per service. Allowed values are Byte values from zero to 100 - - The percentage represents the maximum tolerated percentage of partitions that can be unhealthy - before the service is considered in error. - If the percentage is respected but there is at least one unhealthy partition, the health is - evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy partitions over the total - number of partitions in the service. - The computation rounds up to tolerate one failure on small numbers of partitions. Default - percentage is zero. +class ServiceTypeHealthPolicy(Model): + """Represents the health policy used to evaluate the health of services + belonging to a service type. + + :param max_percent_unhealthy_partitions_per_service: The maximum allowed + percentage of unhealthy partitions per service. Allowed values are Byte + values from zero to 100 + The percentage represents the maximum tolerated percentage of partitions + that can be unhealthy before the service is considered in error. + If the percentage is respected but there is at least one unhealthy + partition, the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy + partitions over the total number of partitions in the service. + The computation rounds up to tolerate one failure on small numbers of + partitions. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_partitions_per_service: int - :param max_percent_unhealthy_replicas_per_partition: The maximum allowed percentage of - unhealthy replicas per partition. Allowed values are Byte values from zero to 100. - - The percentage represents the maximum tolerated percentage of replicas that can be unhealthy - before the partition is considered in error. - If the percentage is respected but there is at least one unhealthy replica, the health is - evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy replicas over the total - number of replicas in the partition. - The computation rounds up to tolerate one failure on small numbers of replicas. Default - percentage is zero. + :param max_percent_unhealthy_replicas_per_partition: The maximum allowed + percentage of unhealthy replicas per partition. Allowed values are Byte + values from zero to 100. + The percentage represents the maximum tolerated percentage of replicas + that can be unhealthy before the partition is considered in error. + If the percentage is respected but there is at least one unhealthy + replica, the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy replicas + over the total number of replicas in the partition. + The computation rounds up to tolerate one failure on small numbers of + replicas. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_replicas_per_partition: int - :param max_percent_unhealthy_services: The maximum allowed percentage of unhealthy services. - Allowed values are Byte values from zero to 100. - - The percentage represents the maximum tolerated percentage of services that can be unhealthy - before the application is considered in error. - If the percentage is respected but there is at least one unhealthy service, the health is - evaluated as Warning. - This is calculated by dividing the number of unhealthy services of the specific service type - over the total number of services of the specific service type. - The computation rounds up to tolerate one failure on small numbers of services. Default - percentage is zero. + :param max_percent_unhealthy_services: The maximum allowed percentage of + unhealthy services. Allowed values are Byte values from zero to 100. + The percentage represents the maximum tolerated percentage of services + that can be unhealthy before the application is considered in error. + If the percentage is respected but there is at least one unhealthy + service, the health is evaluated as Warning. + This is calculated by dividing the number of unhealthy services of the + specific service type over the total number of services of the specific + service type. + The computation rounds up to tolerate one failure on small numbers of + services. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_services: int """ @@ -21744,26 +20229,23 @@ class ServiceTypeHealthPolicy(msrest.serialization.Model): 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceTypeHealthPolicy, self).__init__(**kwargs) self.max_percent_unhealthy_partitions_per_service = kwargs.get('max_percent_unhealthy_partitions_per_service', 0) self.max_percent_unhealthy_replicas_per_partition = kwargs.get('max_percent_unhealthy_replicas_per_partition', 0) self.max_percent_unhealthy_services = kwargs.get('max_percent_unhealthy_services', 0) -class ServiceTypeHealthPolicyMapItem(msrest.serialization.Model): +class ServiceTypeHealthPolicyMapItem(Model): """Defines an item in ServiceTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the service type health policy map item. This is the name of - the service type. + :param key: Required. The key of the service type health policy map item. + This is the name of the service type. :type key: str - :param value: Required. The value of the service type health policy map item. This is the - ServiceTypeHealthPolicy for this service type. + :param value: Required. The value of the service type health policy map + item. This is the ServiceTypeHealthPolicy for this service type. :type value: ~azure.servicefabric.models.ServiceTypeHealthPolicy """ @@ -21777,29 +20259,29 @@ class ServiceTypeHealthPolicyMapItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'ServiceTypeHealthPolicy'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceTypeHealthPolicyMapItem, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) -class ServiceTypeInfo(msrest.serialization.Model): - """Information about a service type that is defined in a service manifest of a provisioned application type. +class ServiceTypeInfo(Model): + """Information about a service type that is defined in a service manifest of a + provisioned application type. - :param service_type_description: Describes a service type defined in the service manifest of a - provisioned application type. The properties the ones defined in the service manifest. - :type service_type_description: ~azure.servicefabric.models.ServiceTypeDescription - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_type_description: Describes a service type defined in the + service manifest of a provisioned application type. The properties the + ones defined in the service manifest. + :type service_type_description: + ~azure.servicefabric.models.ServiceTypeDescription + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param service_manifest_version: The version of the service manifest in which this service type - is defined. + :param service_manifest_version: The version of the service manifest in + which this service type is defined. :type service_manifest_version: str - :param is_service_group: Indicates whether the service is a service group. If it is, the - property value is true otherwise false. + :param is_service_group: Indicates whether the service is a service group. + If it is, the property value is true otherwise false. :type is_service_group: bool """ @@ -21810,10 +20292,7 @@ class ServiceTypeInfo(msrest.serialization.Model): 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceTypeInfo, self).__init__(**kwargs) self.service_type_description = kwargs.get('service_type_description', None) self.service_manifest_name = kwargs.get('service_manifest_name', None) @@ -21821,8 +20300,9 @@ def __init__( self.is_service_group = kwargs.get('is_service_group', None) -class ServiceTypeManifest(msrest.serialization.Model): - """Contains the manifest describing a service type registered as part of an application in a Service Fabric cluster. +class ServiceTypeManifest(Model): + """Contains the manifest describing a service type registered as part of an + application in a Service Fabric cluster. :param manifest: The XML manifest as a string. :type manifest: str @@ -21832,89 +20312,101 @@ class ServiceTypeManifest(msrest.serialization.Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceTypeManifest, self).__init__(**kwargs) self.manifest = kwargs.get('manifest', None) -class ServiceUpdateDescription(msrest.serialization.Model): - """A ServiceUpdateDescription contains all of the information necessary to update a service. +class ServiceUpdateDescription(Model): + """A ServiceUpdateDescription contains all of the information necessary to + update a service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceUpdateDescription, StatelessServiceUpdateDescription. - - All required parameters must be populated in order to send to Azure. - - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and - QuorumLossWaitDuration (4) are set. - - - * None - Does not indicate any other properties are set. The value is zero. - * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property - (for Stateful services) or the InstanceCount property (for Stateless services) is set. The - value is 1. - * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The - value is 2. - * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is - 4. - * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The - value is 8. - * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. - * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. - * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is - 64. - * Correlation - Indicates the CorrelationScheme property is set. The value is 128. - * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. - * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. - * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. - * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The - value is 2048. - * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. - * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is - 8192. - * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 16384. - * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 32768. - * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value - is 65536. - * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. - * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. - * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. + sub-classes are: StatefulServiceUpdateDescription, + StatelessServiceUpdateDescription + + All required parameters must be populated in order to send to Azure. + + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - TargetReplicaSetSize/InstanceCount - Indicates whether the + TargetReplicaSetSize property (for Stateful services) or the InstanceCount + property (for Stateless services) is set. The value is 1. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 2. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 4. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 8. + - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The + value is 16. + - PlacementConstraints - Indicates the PlacementConstraints property is + set. The value is 32. + - PlacementPolicyList - Indicates the ServicePlacementPolicies property is + set. The value is 64. + - Correlation - Indicates the CorrelationScheme property is set. The value + is 128. + - Metrics - Indicates the ServiceLoadMetrics property is set. The value is + 256. + - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The + value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. + - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit + property is set. The value is 2048. + - MinInstanceCount - Indicates the MinInstanceCount property is set. The + value is 4096. + - MinInstancePercentage - Indicates the MinInstancePercentage property is + set. The value is 8192. + - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 16384. + - InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 32768. + - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property + is set. The value is 65536. + - ServiceDnsName - Indicates the ServiceDnsName property is set. The value + is 131072. + - TagsForPlacement - Indicates the TagsForPlacement property is set. The + value is 1048576. + - TagsForRunning - Indicates the TagsForRunning property is set. The value + is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param service_dns_name: The DNS name of the service. :type service_dns_name: str :param tags_for_placement: Tags for placement of this service. :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription :param tags_for_running: Tags for running of this service. :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -21922,7 +20414,6 @@ class ServiceUpdateDescription(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -21933,18 +20424,15 @@ class ServiceUpdateDescription(msrest.serialization.Model): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceUpdateDescription', 'Stateless': 'StatelessServiceUpdateDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceUpdateDescription, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.flags = kwargs.get('flags', None) self.placement_constraints = kwargs.get('placement_constraints', None) self.correlation_scheme = kwargs.get('correlation_scheme', None) @@ -21955,18 +20443,20 @@ def __init__( self.service_dns_name = kwargs.get('service_dns_name', None) self.tags_for_placement = kwargs.get('tags_for_placement', None) self.tags_for_running = kwargs.get('tags_for_running', None) + self.service_kind = None -class ServiceUpgradeProgress(msrest.serialization.Model): - """Information about how many replicas are completed or pending for a specific service during upgrade. +class ServiceUpgradeProgress(Model): + """Information about how many replicas are completed or pending for a specific + service during upgrade. :param service_name: Name of the Service resource. :type service_name: str - :param completed_replica_count: The number of replicas that completes the upgrade in the - service. + :param completed_replica_count: The number of replicas that completes the + upgrade in the service. :type completed_replica_count: str - :param pending_replica_count: The number of replicas that are waiting to be upgraded in the - service. + :param pending_replica_count: The number of replicas that are waiting to + be upgraded in the service. :type pending_replica_count: str """ @@ -21976,25 +20466,26 @@ class ServiceUpgradeProgress(msrest.serialization.Model): 'pending_replica_count': {'key': 'PendingReplicaCount', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ServiceUpgradeProgress, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) self.completed_replica_count = kwargs.get('completed_replica_count', None) self.pending_replica_count = kwargs.get('pending_replica_count', None) -class Setting(msrest.serialization.Model): - """Describes a setting for the container. The setting file path can be fetched from environment variable "Fabric_SettingPath". The path for Windows container is "C:\secrets". The path for Linux container is "/var/secrets". +class Setting(Model): + """Describes a setting for the container. The setting file path can be fetched + from environment variable "Fabric_SettingPath". The path for Windows + container is "C:\\secrets". The path for Linux container is "/var/secrets". - :param type: The type of the setting being given in value. Possible values include: - "ClearText", "KeyVaultReference", "SecretValueReference". Default value: "ClearText". + :param type: The type of the setting being given in value. Possible values + include: 'ClearText', 'KeyVaultReference', 'SecretValueReference'. Default + value: "ClearText" . :type type: str or ~azure.servicefabric.models.SettingType :param name: The name of the setting. :type name: str - :param value: The value of the setting, will be processed based on the type provided. + :param value: The value of the setting, will be processed based on the + type provided. :type value: str """ @@ -22004,10 +20495,7 @@ class Setting(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(Setting, self).__init__(**kwargs) self.type = kwargs.get('type', "ClearText") self.name = kwargs.get('name', None) @@ -22015,19 +20503,20 @@ def __init__( class SingletonPartitionInformation(PartitionInformation): - """Information about a partition that is singleton. The services with singleton partitioning scheme are effectively non-partitioned. They only have one partition. + """Information about a partition that is singleton. The services with + singleton partitioning scheme are effectively non-partitioned. They only + have one partition. All required parameters must be populated in order to send to Azure. - :param service_partition_kind: Required. The kind of partitioning scheme used to partition the - service.Constant filled by server. Possible values include: "Invalid", "Singleton", - "Int64Range", "Named". - :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind - :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a - randomly generated GUID when the service was created. The partition ID is unique and does not - change for the lifetime of the service. If the same service was deleted and recreated the IDs - of its partitions would be different. + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str """ _validation = { @@ -22035,26 +20524,23 @@ class SingletonPartitionInformation(PartitionInformation): } _attribute_map = { - 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SingletonPartitionInformation, self).__init__(**kwargs) - self.service_partition_kind = 'Singleton' # type: str + self.service_partition_kind = 'Singleton' class SingletonPartitionSchemeDescription(PartitionSchemeDescription): - """Describes the partition scheme of a singleton-partitioned, or non-partitioned service. + """Describes the partition scheme of a singleton-partitioned, or + non-partitioned service. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by - server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". - :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str """ _validation = { @@ -22065,66 +20551,76 @@ class SingletonPartitionSchemeDescription(PartitionSchemeDescription): 'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SingletonPartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = 'Singleton' # type: str + self.partition_scheme = 'Singleton' -class StartClusterUpgradeDescription(msrest.serialization.Model): +class StartClusterUpgradeDescription(Model): """Describes the parameters for starting a cluster upgrade. :param code_version: The cluster code version. :type code_version: str :param config_version: The cluster configuration version. :type config_version: str - :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values - include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: The kind of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling'. Default value: + "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible - values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", - "ReverseLexicographical". Default value: "Default". + :param sort_order: Defines the order in which an upgrade proceeds through + the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', + 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default + value: "Default" . :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than - absolute health evaluation after completion of each upgrade domain. + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health + evaluation rather than absolute health evaluation after completion of each + upgrade domain. :type enable_delta_health_evaluation: bool - :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of - the cluster during a cluster upgrade. + :param cluster_upgrade_health_policy: Defines a health policy used to + evaluate the health of the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Defines the application health policy map used to - evaluate the health of an application or one of its children entities. - :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicies - :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a - stateless instance is closed, to allow the active requests to drain gracefully. This would be - effective when the instance is closing during the application/cluster - upgrade, only for those instances which have a non-zero delay duration configured in the - service description. See InstanceCloseDelayDurationSeconds property in $ref: + :param application_health_policy_map: Defines the application health + policy map used to evaluate the health of an application or one of its + children entities. + :type application_health_policy_map: + ~azure.servicefabric.models.ApplicationHealthPolicies + :param instance_close_delay_duration_in_seconds: Duration in seconds, to + wait before a stateless instance is closed, to allow the active requests + to drain gracefully. This would be effective when the instance is closing + during the application/cluster + upgrade, only for those instances which have a non-zero delay duration + configured in the service description. See + InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates - that the behavior will entirely depend on the delay configured in the stateless service - description. + Note, the default value of InstanceCloseDelayDurationInSeconds is + 4294967295, which indicates that the behavior will entirely depend on the + delay configured in the stateless service description. :type instance_close_delay_duration_in_seconds: long """ @@ -22144,24 +20640,21 @@ class StartClusterUpgradeDescription(msrest.serialization.Model): 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StartClusterUpgradeDescription, self).__init__(**kwargs) self.code_version = kwargs.get('code_version', None) self.config_version = kwargs.get('config_version', None) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") - self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', 42949672925) - self.force_restart = kwargs.get('force_restart', False) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.force_restart = kwargs.get('force_restart', None) self.sort_order = kwargs.get('sort_order', "Default") self.monitoring_policy = kwargs.get('monitoring_policy', None) self.cluster_health_policy = kwargs.get('cluster_health_policy', None) self.enable_delta_health_evaluation = kwargs.get('enable_delta_health_evaluation', None) self.cluster_upgrade_health_policy = kwargs.get('cluster_upgrade_health_policy', None) self.application_health_policy_map = kwargs.get('application_health_policy_map', None) - self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', 4294967295) + self.instance_close_delay_duration_in_seconds = kwargs.get('instance_close_delay_duration_in_seconds', None) class StartedChaosEvent(ChaosEvent): @@ -22169,34 +20662,31 @@ class StartedChaosEvent(ChaosEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param chaos_parameters: Defines all the parameters to configure a Chaos run. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param chaos_parameters: Defines all the parameters to configure a Chaos + run. :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'ChaosParameters'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StartedChaosEvent, self).__init__(**kwargs) - self.kind = 'Started' # type: str self.chaos_parameters = kwargs.get('chaos_parameters', None) + self.kind = 'Started' class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): @@ -22204,48 +20694,31 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param replica_instance_id: Required. Id of Replica instance. :type replica_instance_id: long @@ -22261,16 +20734,17 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'replica_instance_id': {'required': True}, @@ -22285,11 +20759,11 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, @@ -22303,21 +20777,18 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulReplicaHealthReportExpiredEvent, self).__init__(**kwargs) - self.kind = 'StatefulReplicaHealthReportExpired' # type: str - self.replica_instance_id = kwargs['replica_instance_id'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.replica_instance_id = kwargs.get('replica_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'StatefulReplicaHealthReportExpired' class StatefulReplicaNewHealthReportEvent(ReplicaEvent): @@ -22325,48 +20796,31 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param replica_instance_id: Required. Id of Replica instance. :type replica_instance_id: long @@ -22382,16 +20836,17 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'replica_instance_id': {'required': True}, @@ -22406,11 +20861,11 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, @@ -22424,21 +20879,18 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulReplicaNewHealthReportEvent, self).__init__(**kwargs) - self.kind = 'StatefulReplicaNewHealthReport' # type: str - self.replica_instance_id = kwargs['replica_instance_id'] - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.replica_instance_id = kwargs.get('replica_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'StatefulReplicaNewHealthReport' class StatefulServiceDescription(ServiceDescription): @@ -22446,105 +20898,121 @@ class StatefulServiceDescription(ServiceDescription): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified in the service - manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. Initialization data - is passed to service instances or replicas when they are created. + :param initialization_data: The initialization data as an array of bytes. + Initialization data is passed to service instances or replicas when they + are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an object. - :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an + object. + :type partition_description: + ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost + property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service package to be used for a - service. Possible values include: "SharedProcess", "ExclusiveProcess". + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS system service to be - enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param tags_required_to_place: Tags for placement of this service. - :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_place: + ~azure.servicefabric.models.NodeTagsDescription :param tags_required_to_run: Tags for running of this service. - :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription - :param target_replica_set_size: Required. The target replica set size as a number. + :type tags_required_to_run: + ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param target_replica_set_size: Required. The target replica set size as a + number. :type target_replica_set_size: int - :param min_replica_set_size: Required. The minimum replica set size as a number. + :param min_replica_set_size: Required. The minimum replica set size as a + number. :type min_replica_set_size: int - :param has_persisted_state: Required. A flag indicating whether this is a persistent service - which stores states on the local disk. If it is then the value of this property is true, if not - it is false. + :param has_persisted_state: Required. A flag indicating whether this is a + persistent service which stores states on the local disk. If it is then + the value of this property is true, if not it is false. :type has_persisted_state: bool - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then the flags for QuorumLossWaitDuration (2) and - StandByReplicaKeepDuration(4) are set. - - - * None - Does not indicate any other properties are set. The value is zero. - * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The - value is 1. - * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is - 2. - * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The - value is 4. - * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The - value is 8. - * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value - is 16. + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + QuorumLossWaitDuration (2) and StandByReplicaKeepDuration(4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 1. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 2. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 4. + - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit + property is set. The value is 8. + - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property + is set. The value is 16. :type flags: int - :param replica_restart_wait_duration_seconds: The duration, in seconds, between when a replica - goes down and when a new replica is created. + :param replica_restart_wait_duration_seconds: The duration, in seconds, + between when a replica goes down and when a new replica is created. :type replica_restart_wait_duration_seconds: long - :param quorum_loss_wait_duration_seconds: The maximum duration, in seconds, for which a - partition is allowed to be in a state of quorum loss. + :param quorum_loss_wait_duration_seconds: The maximum duration, in + seconds, for which a partition is allowed to be in a state of quorum loss. :type quorum_loss_wait_duration_seconds: long - :param stand_by_replica_keep_duration_seconds: The definition on how long StandBy replicas - should be maintained before being removed. + :param stand_by_replica_keep_duration_seconds: The definition on how long + StandBy replicas should be maintained before being removed. :type stand_by_replica_keep_duration_seconds: long - :param service_placement_time_limit_seconds: The duration for which replicas can stay InBuild - before reporting that build is stuck. + :param service_placement_time_limit_seconds: The duration for which + replicas can stay InBuild before reporting that build is stuck. :type service_placement_time_limit_seconds: long - :param drop_source_replica_on_move: Indicates whether to drop source Secondary replica even if - the target replica has not finished build. If desired behavior is to drop it as soon as - possible the value of this property is true, if not it is false. + :param drop_source_replica_on_move: Indicates whether to drop source + Secondary replica even if the target replica has not finished build. If + desired behavior is to drop it as soon as possible the value of this + property is true, if not it is false. :type drop_source_replica_on_move: bool - :param replica_lifecycle_description: Defines how replicas of this service will behave during - their lifecycle. - :type replica_lifecycle_description: ~azure.servicefabric.models.ReplicaLifecycleDescription + :param replica_lifecycle_description: Defines how replicas of this service + will behave during their lifecycle. + :type replica_lifecycle_description: + ~azure.servicefabric.models.ReplicaLifecycleDescription """ _validation = { - 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, + 'service_kind': {'required': True}, 'target_replica_set_size': {'required': True, 'minimum': 1}, 'min_replica_set_size': {'required': True, 'minimum': 1}, 'has_persisted_state': {'required': True}, @@ -22555,7 +21023,6 @@ class StatefulServiceDescription(ServiceDescription): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -22572,6 +21039,7 @@ class StatefulServiceDescription(ServiceDescription): 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, @@ -22584,15 +21052,11 @@ class StatefulServiceDescription(ServiceDescription): 'replica_lifecycle_description': {'key': 'ReplicaLifecycleDescription', 'type': 'ReplicaLifecycleDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulServiceDescription, self).__init__(**kwargs) - self.service_kind = 'Stateful' # type: str - self.target_replica_set_size = kwargs['target_replica_set_size'] - self.min_replica_set_size = kwargs['min_replica_set_size'] - self.has_persisted_state = kwargs['has_persisted_state'] + self.target_replica_set_size = kwargs.get('target_replica_set_size', None) + self.min_replica_set_size = kwargs.get('min_replica_set_size', None) + self.has_persisted_state = kwargs.get('has_persisted_state', None) self.flags = kwargs.get('flags', None) self.replica_restart_wait_duration_seconds = kwargs.get('replica_restart_wait_duration_seconds', None) self.quorum_loss_wait_duration_seconds = kwargs.get('quorum_loss_wait_duration_seconds', None) @@ -22600,6 +21064,7 @@ def __init__( self.service_placement_time_limit_seconds = kwargs.get('service_placement_time_limit_seconds', None) self.drop_source_replica_on_move = kwargs.get('drop_source_replica_on_move', None) self.replica_lifecycle_description = kwargs.get('replica_lifecycle_description', None) + self.service_kind = 'Stateful' class StatefulServiceInfo(ServiceInfo): @@ -22607,31 +21072,33 @@ class StatefulServiceInfo(ServiceInfo): All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded representation of the service - name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param id: The identity of the service. This ID is an encoded + representation of the service name. This is used in the REST APIs to + identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type id: str - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service manifest. + :param type_name: Name of the service type as specified in the service + manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values include: "Unknown", - "Active", "Upgrading", "Deleting", "Creating", "Failed". + :param service_status: The status of the application. Possible values + include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', + 'Failed' :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param has_persisted_state: Whether the service has persisted state. :type has_persisted_state: bool """ @@ -22642,23 +21109,20 @@ class StatefulServiceInfo(ServiceInfo): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulServiceInfo, self).__init__(**kwargs) - self.service_kind = 'Stateful' # type: str self.has_persisted_state = kwargs.get('has_persisted_state', None) + self.service_kind = 'Stateful' class StatefulServicePartitionInfo(ServicePartitionInfo): @@ -22666,31 +21130,35 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service partition. Possible values - include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". - :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, partitioning scheme and - keys supported by it. - :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param partition_status: The status of the service fabric service + partition. Possible values include: 'Invalid', 'Ready', 'NotReady', + 'InQuorumLoss', 'Reconfiguring', 'Deleting' + :type partition_status: str or + ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, + partitioning scheme and keys supported by it. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: long :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: long - :param last_quorum_loss_duration: The duration for which this partition was in quorum loss. If - the partition is currently in quorum loss, it returns the duration since it has been in that - state. This field is using ISO8601 format for specifying the duration. - :type last_quorum_loss_duration: ~datetime.timedelta - :param primary_epoch: An Epoch is a configuration number for the partition as a whole. When the - configuration of the replica set changes, for example when the Primary replica changes, the - operations that are replicated from the new Primary replica are said to be a new Epoch from the - ones which were sent by the old Primary replica. + :param last_quorum_loss_duration: The duration for which this partition + was in quorum loss. If the partition is currently in quorum loss, it + returns the duration since it has been in that state. This field is using + ISO8601 format for specifying the duration. + :type last_quorum_loss_duration: timedelta + :param primary_epoch: An Epoch is a configuration number for the partition + as a whole. When the configuration of the replica set changes, for example + when the Primary replica changes, the operations that are replicated from + the new Primary replica are said to be a new Epoch from the ones which + were sent by the old Primary replica. :type primary_epoch: ~azure.servicefabric.models.Epoch """ @@ -22699,59 +21167,60 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'long'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'long'}, 'last_quorum_loss_duration': {'key': 'LastQuorumLossDuration', 'type': 'duration'}, 'primary_epoch': {'key': 'PrimaryEpoch', 'type': 'Epoch'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulServicePartitionInfo, self).__init__(**kwargs) - self.service_kind = 'Stateful' # type: str self.target_replica_set_size = kwargs.get('target_replica_set_size', None) self.min_replica_set_size = kwargs.get('min_replica_set_size', None) self.last_quorum_loss_duration = kwargs.get('last_quorum_loss_duration', None) self.primary_epoch = kwargs.get('primary_epoch', None) + self.service_kind = 'Stateful' class StatefulServiceReplicaHealth(ReplicaHealth): """Represents the health of the stateful service replica. -Contains the replica aggregated health state, the health events and the unhealthy evaluations. + Contains the replica aggregated health state, the health events and the + unhealthy evaluations. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str """ @@ -22764,39 +21233,41 @@ class StatefulServiceReplicaHealth(ReplicaHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulServiceReplicaHealth, self).__init__(**kwargs) - self.service_kind = 'Stateful' # type: str self.replica_id = kwargs.get('replica_id', None) + self.service_kind = 'Stateful' class StatefulServiceReplicaHealthState(ReplicaHealthState): - """Represents the health state of the stateful service replica, which contains the replica ID and the aggregated health state. + """Represents the health state of the stateful service replica, which contains + the replica ID and the aggregated health state. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param partition_id: The ID of the partition to which this replica belongs. + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: The ID of the partition to which this replica + belongs. :type partition_id: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str """ @@ -22806,49 +21277,52 @@ class StatefulServiceReplicaHealthState(ReplicaHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulServiceReplicaHealthState, self).__init__(**kwargs) - self.service_kind = 'Stateful' # type: str self.replica_id = kwargs.get('replica_id', None) + self.service_kind = 'Stateful' class StatefulServiceReplicaInfo(ReplicaInfo): - """Represents a stateful service replica. This includes information about the identity, role, status, health, node name, uptime, and other details about the replica. + """Represents a stateful service replica. This includes information about the + identity, role, status, health, node name, uptime, and other details about + the replica. All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of + the replica in seconds. :type last_in_build_duration_in_seconds: str - :param replica_role: The role of a replica of a stateful service. Possible values include: - "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_role: The role of a replica of a stateful service. Possible + values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', + 'ActiveSecondary' :type replica_role: str or ~azure.servicefabric.models.ReplicaRole - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str """ @@ -22857,53 +21331,55 @@ class StatefulServiceReplicaInfo(ReplicaInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulServiceReplicaInfo, self).__init__(**kwargs) - self.service_kind = 'Stateful' # type: str self.replica_role = kwargs.get('replica_role', None) self.replica_id = kwargs.get('replica_id', None) + self.service_kind = 'Stateful' class StatefulServiceTypeDescription(ServiceTypeDescription): - """Describes a stateful service type defined in the service manifest of a provisioned application type. + """Describes a stateful service type defined in the service manifest of a + provisioned application type. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. - Possible values include: "Invalid", "Stateless", "Stateful". - :type kind: str or ~azure.servicefabric.models.ServiceKind - :param is_stateful: Indicates whether the service type is a stateful service type or a - stateless service type. This property is true if the service type is a stateful service type, - false otherwise. + :param is_stateful: Indicates whether the service type is a stateful + service type or a stateless service type. This property is true if the + service type is a stateful service type, false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when instantiating this - service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when + instantiating this service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy descriptions. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy + descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param has_persisted_state: A flag indicating whether this is a persistent service which stores - states on the local disk. If it is then the value of this property is true, if not it is false. + :type extensions: + list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param kind: Required. Constant filled by server. + :type kind: str + :param has_persisted_state: A flag indicating whether this is a persistent + service which stores states on the local disk. If it is then the value of + this property is true, if not it is false. :type has_persisted_state: bool """ @@ -22912,23 +21388,20 @@ class StatefulServiceTypeDescription(ServiceTypeDescription): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulServiceTypeDescription, self).__init__(**kwargs) - self.kind = 'Stateful' # type: str self.has_persisted_state = kwargs.get('has_persisted_state', None) + self.kind = 'Stateful' class StatefulServiceUpdateDescription(ServiceUpdateDescription): @@ -22936,96 +21409,111 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and - QuorumLossWaitDuration (4) are set. - - - * None - Does not indicate any other properties are set. The value is zero. - * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property - (for Stateful services) or the InstanceCount property (for Stateless services) is set. The - value is 1. - * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The - value is 2. - * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is - 4. - * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The - value is 8. - * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. - * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. - * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is - 64. - * Correlation - Indicates the CorrelationScheme property is set. The value is 128. - * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. - * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. - * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. - * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The - value is 2048. - * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. - * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is - 8192. - * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 16384. - * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 32768. - * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value - is 65536. - * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. - * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. - * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - TargetReplicaSetSize/InstanceCount - Indicates whether the + TargetReplicaSetSize property (for Stateful services) or the InstanceCount + property (for Stateless services) is set. The value is 1. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 2. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 4. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 8. + - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The + value is 16. + - PlacementConstraints - Indicates the PlacementConstraints property is + set. The value is 32. + - PlacementPolicyList - Indicates the ServicePlacementPolicies property is + set. The value is 64. + - Correlation - Indicates the CorrelationScheme property is set. The value + is 128. + - Metrics - Indicates the ServiceLoadMetrics property is set. The value is + 256. + - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The + value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. + - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit + property is set. The value is 2048. + - MinInstanceCount - Indicates the MinInstanceCount property is set. The + value is 4096. + - MinInstancePercentage - Indicates the MinInstancePercentage property is + set. The value is 8192. + - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 16384. + - InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 32768. + - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property + is set. The value is 65536. + - ServiceDnsName - Indicates the ServiceDnsName property is set. The value + is 131072. + - TagsForPlacement - Indicates the TagsForPlacement property is set. The + value is 1048576. + - TagsForRunning - Indicates the TagsForRunning property is set. The value + is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param service_dns_name: The DNS name of the service. :type service_dns_name: str :param tags_for_placement: Tags for placement of this service. :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription :param tags_for_running: Tags for running of this service. :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: int :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: int - :param replica_restart_wait_duration_seconds: The duration, in seconds, between when a replica - goes down and when a new replica is created. + :param replica_restart_wait_duration_seconds: The duration, in seconds, + between when a replica goes down and when a new replica is created. :type replica_restart_wait_duration_seconds: str - :param quorum_loss_wait_duration_seconds: The maximum duration, in seconds, for which a - partition is allowed to be in a state of quorum loss. + :param quorum_loss_wait_duration_seconds: The maximum duration, in + seconds, for which a partition is allowed to be in a state of quorum loss. :type quorum_loss_wait_duration_seconds: str - :param stand_by_replica_keep_duration_seconds: The definition on how long StandBy replicas - should be maintained before being removed. + :param stand_by_replica_keep_duration_seconds: The definition on how long + StandBy replicas should be maintained before being removed. :type stand_by_replica_keep_duration_seconds: str - :param service_placement_time_limit_seconds: The duration for which replicas can stay InBuild - before reporting that build is stuck. + :param service_placement_time_limit_seconds: The duration for which + replicas can stay InBuild before reporting that build is stuck. :type service_placement_time_limit_seconds: str - :param drop_source_replica_on_move: Indicates whether to drop source Secondary replica even if - the target replica has not finished build. If desired behavior is to drop it as soon as - possible the value of this property is true, if not it is false. + :param drop_source_replica_on_move: Indicates whether to drop source + Secondary replica even if the target replica has not finished build. If + desired behavior is to drop it as soon as possible the value of this + property is true, if not it is false. :type drop_source_replica_on_move: bool - :param replica_lifecycle_description: Defines how replicas of this service will behave during - their lifecycle. - :type replica_lifecycle_description: ~azure.servicefabric.models.ReplicaLifecycleDescription + :param replica_lifecycle_description: Defines how replicas of this service + will behave during their lifecycle. + :type replica_lifecycle_description: + ~azure.servicefabric.models.ReplicaLifecycleDescription """ _validation = { @@ -23035,7 +21523,6 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -23046,6 +21533,7 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, 'replica_restart_wait_duration_seconds': {'key': 'ReplicaRestartWaitDurationSeconds', 'type': 'str'}, @@ -23056,12 +21544,8 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'replica_lifecycle_description': {'key': 'ReplicaLifecycleDescription', 'type': 'ReplicaLifecycleDescription'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatefulServiceUpdateDescription, self).__init__(**kwargs) - self.service_kind = 'Stateful' # type: str self.target_replica_set_size = kwargs.get('target_replica_set_size', None) self.min_replica_set_size = kwargs.get('min_replica_set_size', None) self.replica_restart_wait_duration_seconds = kwargs.get('replica_restart_wait_duration_seconds', None) @@ -23070,6 +21554,7 @@ def __init__( self.service_placement_time_limit_seconds = kwargs.get('service_placement_time_limit_seconds', None) self.drop_source_replica_on_move = kwargs.get('drop_source_replica_on_move', None) self.replica_lifecycle_description = kwargs.get('replica_lifecycle_description', None) + self.service_kind = 'Stateful' class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): @@ -23077,48 +21562,31 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param source_id: Required. Id of report source. :type source_id: str @@ -23132,16 +21600,17 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'source_id': {'required': True}, @@ -23155,11 +21624,11 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -23172,20 +21641,17 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessReplicaHealthReportExpiredEvent, self).__init__(**kwargs) - self.kind = 'StatelessReplicaHealthReportExpired' # type: str - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'StatelessReplicaHealthReportExpired' class StatelessReplicaNewHealthReportEvent(ReplicaEvent): @@ -23193,48 +21659,31 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param source_id: Required. Id of report source. :type source_id: str @@ -23248,16 +21697,17 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'source_id': {'required': True}, @@ -23271,11 +21721,11 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -23288,20 +21738,17 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessReplicaNewHealthReportEvent, self).__init__(**kwargs) - self.kind = 'StatelessReplicaNewHealthReport' # type: str - self.source_id = kwargs['source_id'] - self.property = kwargs['property'] - self.health_state = kwargs['health_state'] - self.time_to_live_ms = kwargs['time_to_live_ms'] - self.sequence_number = kwargs['sequence_number'] - self.description = kwargs['description'] - self.remove_when_expired = kwargs['remove_when_expired'] - self.source_utc_timestamp = kwargs['source_utc_timestamp'] + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'StatelessReplicaNewHealthReport' class StatelessServiceDescription(ServiceDescription): @@ -23309,128 +21756,143 @@ class StatelessServiceDescription(ServiceDescription): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified in the service - manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. Initialization data - is passed to service instances or replicas when they are created. + :param initialization_data: The initialization data as an array of bytes. + Initialization data is passed to service instances or replicas when they + are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an object. - :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an + object. + :type partition_description: + ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost + property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service package to be used for a - service. Possible values include: "SharedProcess", "ExclusiveProcess". + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS system service to be - enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param tags_required_to_place: Tags for placement of this service. - :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_place: + ~azure.servicefabric.models.NodeTagsDescription :param tags_required_to_run: Tags for running of this service. - :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_run: + ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param instance_count: Required. The instance count. :type instance_count: int - :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up - to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted - into the number of nodes on which the instances are allowed to be placed according to the - placement constraints on the service. + :param min_instance_count: MinInstanceCount is the minimum number of + instances that must be up to meet the EnsureAvailability safety check + during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation + -1 is first converted into the number of nodes on which the instances are + allowed to be placed according to the placement constraints on the + service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum percentage of - InstanceCount that must be up to meet the EnsureAvailability safety check during operations - like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first - converted into the number of nodes on which the instances are allowed to be placed according to - the placement constraints on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum + percentage of InstanceCount that must be up to meet the EnsureAvailability + safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage + computation, -1 is first converted into the number of nodes on which the + instances are allowed to be placed according to the placement constraints + on the service. :type min_instance_percentage: int - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 1 then the flags for InstanceCloseDelayDuration is set. - - - * None - Does not indicate any other properties are set. The value is zero. - * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 1. - * InstanceRestartWaitDuration - Indicates the InstanceRestartWaitDurationSeconds property is - set. The value is 2. + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 1 then the flags for + InstanceCloseDelayDuration is set. + - None - Does not indicate any other properties are set. The value is + zero. + - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 1. + - InstanceRestartWaitDuration - Indicates the + InstanceRestartWaitDurationSeconds property is set. The value is 2. :type flags: int - :param instance_close_delay_duration_seconds: Duration in seconds, to wait before a stateless - instance is closed, to allow the active requests to drain gracefully. This would be effective - when the instance is closing during the application/cluster upgrade and disabling node. - The endpoint exposed on this instance is removed prior to starting the delay, which prevents - new connections to this instance. + :param instance_close_delay_duration_seconds: Duration in seconds, to wait + before a stateless instance is closed, to allow the active requests to + drain gracefully. This would be effective when the instance is closing + during the application/cluster upgrade and disabling node. + The endpoint exposed on this instance is removed prior to starting the + delay, which prevents new connections to this instance. In addition, clients that have subscribed to service endpoint change events(https://docs.microsoft.com/dotnet/api/system.fabric.fabricclient.servicemanagementclient.registerservicenotificationfilterasync), can do the following upon receiving the endpoint removal notification: - - .. code-block:: - - - Stop sending new requests to this instance. - - Close existing connections after in-flight requests have completed. - - Connect to a different instance of the service partition for future requests. - - Note, the default value of InstanceCloseDelayDuration is 0, which indicates that there won't - be any delay or removal of the endpoint prior to closing the instance. + - Stop sending new requests to this instance. + - Close existing connections after in-flight requests have completed. + - Connect to a different instance of the service partition for future + requests. + Note, the default value of InstanceCloseDelayDuration is 0, which + indicates that there won't be any delay or removal of the endpoint prior + to closing the instance. :type instance_close_delay_duration_seconds: long - :param instance_lifecycle_description: Defines how instances of this service will behave during - their lifecycle. - :type instance_lifecycle_description: ~azure.servicefabric.models.InstanceLifecycleDescription - :param instance_restart_wait_duration_seconds: When a stateless instance goes down, this timer - starts. When it expires Service Fabric will create a new instance on any node in the cluster. - This configuration is to reduce unnecessary creation of a new instance in situations where the - instance going down is likely to recover in a short time. For example, during an upgrade. - The default value is 0, which indicates that when stateless instance goes down, Service Fabric - will immediately start building its replacement. + :param instance_lifecycle_description: Defines how instances of this + service will behave during their lifecycle. + :type instance_lifecycle_description: + ~azure.servicefabric.models.InstanceLifecycleDescription + :param instance_restart_wait_duration_seconds: When a stateless instance + goes down, this timer starts. When it expires Service Fabric will create a + new instance on any node in the cluster. + This configuration is to reduce unnecessary creation of a new instance in + situations where the instance going down is likely to recover in a short + time. For example, during an upgrade. + The default value is 0, which indicates that when stateless instance goes + down, Service Fabric will immediately start building its replacement. :type instance_restart_wait_duration_seconds: long """ _validation = { - 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, + 'service_kind': {'required': True}, 'instance_count': {'required': True, 'minimum': -1}, - 'min_instance_count': {'minimum': 1}, - 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, 'instance_close_delay_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, 'instance_restart_wait_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -23447,6 +21909,7 @@ class StatelessServiceDescription(ServiceDescription): 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, @@ -23456,19 +21919,16 @@ class StatelessServiceDescription(ServiceDescription): 'instance_restart_wait_duration_seconds': {'key': 'InstanceRestartWaitDurationSeconds', 'type': 'long'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessServiceDescription, self).__init__(**kwargs) - self.service_kind = 'Stateless' # type: str - self.instance_count = kwargs['instance_count'] - self.min_instance_count = kwargs.get('min_instance_count', 1) - self.min_instance_percentage = kwargs.get('min_instance_percentage', 0) + self.instance_count = kwargs.get('instance_count', None) + self.min_instance_count = kwargs.get('min_instance_count', None) + self.min_instance_percentage = kwargs.get('min_instance_percentage', None) self.flags = kwargs.get('flags', None) self.instance_close_delay_duration_seconds = kwargs.get('instance_close_delay_duration_seconds', None) self.instance_lifecycle_description = kwargs.get('instance_lifecycle_description', None) self.instance_restart_wait_duration_seconds = kwargs.get('instance_restart_wait_duration_seconds', None) + self.service_kind = 'Stateless' class StatelessServiceInfo(ServiceInfo): @@ -23476,31 +21936,33 @@ class StatelessServiceInfo(ServiceInfo): All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded representation of the service - name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param id: The identity of the service. This ID is an encoded + representation of the service name. This is used in the REST APIs to + identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type id: str - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service manifest. + :param type_name: Name of the service type as specified in the service + manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values include: "Unknown", - "Active", "Upgrading", "Deleting", "Creating", "Failed". + :param service_status: The status of the application. Possible values + include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', + 'Failed' :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -23509,53 +21971,53 @@ class StatelessServiceInfo(ServiceInfo): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessServiceInfo, self).__init__(**kwargs) - self.service_kind = 'Stateless' # type: str + self.service_kind = 'Stateless' class StatelessServiceInstanceHealth(ReplicaHealth): """Represents the health of the stateless service instance. -Contains the instance aggregated health state, the health events and the unhealthy evaluations. + Contains the instance aggregated health state, the health events and the + unhealthy evaluations. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to - uniquely identify an instance of a partition of a stateless service. It is unique within a - partition and does not change for the lifetime of the instance. If the instance has failed over - on the same or different node, it will get a different value for the InstanceId. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -23568,36 +22030,36 @@ class StatelessServiceInstanceHealth(ReplicaHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessServiceInstanceHealth, self).__init__(**kwargs) - self.service_kind = 'Stateless' # type: str self.instance_id = kwargs.get('instance_id', None) + self.service_kind = 'Stateless' class StatelessServiceInstanceHealthState(ReplicaHealthState): - """Represents the health state of the stateless service instance, which contains the instance ID and the aggregated health state. + """Represents the health state of the stateless service instance, which + contains the instance ID and the aggregated health state. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param partition_id: The ID of the partition to which this replica belongs. + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: The ID of the partition to which this replica + belongs. :type partition_id: str - :param replica_id: Id of the stateless service instance on the wire this field is called - ReplicaId. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of the stateless service instance on the wire this + field is called ReplicaId. :type replica_id: str """ @@ -23607,45 +22069,46 @@ class StatelessServiceInstanceHealthState(ReplicaHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessServiceInstanceHealthState, self).__init__(**kwargs) - self.service_kind = 'Stateless' # type: str self.replica_id = kwargs.get('replica_id', None) + self.service_kind = 'Stateless' class StatelessServiceInstanceInfo(ReplicaInfo): - """Represents a stateless service instance. This includes information about the identity, status, health, node name, uptime, and other details about the instance. + """Represents a stateless service instance. This includes information about + the identity, status, health, node name, uptime, and other details about + the instance. All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of + the replica in seconds. :type last_in_build_duration_in_seconds: str - :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to - uniquely identify an instance of a partition of a stateless service. It is unique within a - partition and does not change for the lifetime of the instance. If the instance has failed over - on the same or different node, it will get a different value for the InstanceId. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -23654,22 +22117,19 @@ class StatelessServiceInstanceInfo(ReplicaInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessServiceInstanceInfo, self).__init__(**kwargs) - self.service_kind = 'Stateless' # type: str self.instance_id = kwargs.get('instance_id', None) + self.service_kind = 'Stateless' class StatelessServicePartitionInfo(ServicePartitionInfo): @@ -23677,95 +22137,100 @@ class StatelessServicePartitionInfo(ServicePartitionInfo): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service partition. Possible values - include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". - :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, partitioning scheme and - keys supported by it. - :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param partition_status: The status of the service fabric service + partition. Possible values include: 'Invalid', 'Ready', 'NotReady', + 'InQuorumLoss', 'Reconfiguring', 'Deleting' + :type partition_status: str or + ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, + partitioning scheme and keys supported by it. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param instance_count: Number of instances of this partition. :type instance_count: long - :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up - to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted - into the number of nodes on which the instances are allowed to be placed according to the - placement constraints on the service. + :param min_instance_count: MinInstanceCount is the minimum number of + instances that must be up to meet the EnsureAvailability safety check + during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation + -1 is first converted into the number of nodes on which the instances are + allowed to be placed according to the placement constraints on the + service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum percentage of - InstanceCount that must be up to meet the EnsureAvailability safety check during operations - like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first - converted into the number of nodes on which the instances are allowed to be placed according to - the placement constraints on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum + percentage of InstanceCount that must be up to meet the EnsureAvailability + safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage + computation, -1 is first converted into the number of nodes on which the + instances are allowed to be placed according to the placement constraints + on the service. :type min_instance_percentage: int """ _validation = { 'service_kind': {'required': True}, - 'min_instance_count': {'minimum': 1}, - 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'long'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessServicePartitionInfo, self).__init__(**kwargs) - self.service_kind = 'Stateless' # type: str self.instance_count = kwargs.get('instance_count', None) - self.min_instance_count = kwargs.get('min_instance_count', 1) - self.min_instance_percentage = kwargs.get('min_instance_percentage', 0) + self.min_instance_count = kwargs.get('min_instance_count', None) + self.min_instance_percentage = kwargs.get('min_instance_percentage', None) + self.service_kind = 'Stateless' class StatelessServiceTypeDescription(ServiceTypeDescription): - """Describes a stateless service type defined in the service manifest of a provisioned application type. + """Describes a stateless service type defined in the service manifest of a + provisioned application type. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. - Possible values include: "Invalid", "Stateless", "Stateful". - :type kind: str or ~azure.servicefabric.models.ServiceKind - :param is_stateful: Indicates whether the service type is a stateful service type or a - stateless service type. This property is true if the service type is a stateful service type, - false otherwise. + :param is_stateful: Indicates whether the service type is a stateful + service type or a stateless service type. This property is true if the + service type is a stateful service type, false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when instantiating this - service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when + instantiating this service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy descriptions. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy + descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param use_implicit_host: A flag indicating if this type is not implemented and hosted by a - user service process, but is implicitly hosted by a system created process. This value is true - for services using the guest executable services, false otherwise. + :type extensions: + list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param kind: Required. Constant filled by server. + :type kind: str + :param use_implicit_host: A flag indicating if this type is not + implemented and hosted by a user service process, but is implicitly hosted + by a system created process. This value is true for services using the + guest executable services, false otherwise. :type use_implicit_host: bool """ @@ -23774,23 +22239,20 @@ class StatelessServiceTypeDescription(ServiceTypeDescription): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'use_implicit_host': {'key': 'UseImplicitHost', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessServiceTypeDescription, self).__init__(**kwargs) - self.kind = 'Stateless' # type: str self.use_implicit_host = kwargs.get('use_implicit_host', None) + self.kind = 'Stateless' class StatelessServiceUpdateDescription(ServiceUpdateDescription): @@ -23798,129 +22260,144 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and - QuorumLossWaitDuration (4) are set. - - - * None - Does not indicate any other properties are set. The value is zero. - * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property - (for Stateful services) or the InstanceCount property (for Stateless services) is set. The - value is 1. - * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The - value is 2. - * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is - 4. - * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The - value is 8. - * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. - * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. - * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is - 64. - * Correlation - Indicates the CorrelationScheme property is set. The value is 128. - * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. - * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. - * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. - * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The - value is 2048. - * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. - * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is - 8192. - * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 16384. - * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 32768. - * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value - is 65536. - * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. - * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. - * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - TargetReplicaSetSize/InstanceCount - Indicates whether the + TargetReplicaSetSize property (for Stateful services) or the InstanceCount + property (for Stateless services) is set. The value is 1. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 2. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 4. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 8. + - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The + value is 16. + - PlacementConstraints - Indicates the PlacementConstraints property is + set. The value is 32. + - PlacementPolicyList - Indicates the ServicePlacementPolicies property is + set. The value is 64. + - Correlation - Indicates the CorrelationScheme property is set. The value + is 128. + - Metrics - Indicates the ServiceLoadMetrics property is set. The value is + 256. + - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The + value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. + - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit + property is set. The value is 2048. + - MinInstanceCount - Indicates the MinInstanceCount property is set. The + value is 4096. + - MinInstancePercentage - Indicates the MinInstancePercentage property is + set. The value is 8192. + - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 16384. + - InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 32768. + - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property + is set. The value is 65536. + - ServiceDnsName - Indicates the ServiceDnsName property is set. The value + is 131072. + - TagsForPlacement - Indicates the TagsForPlacement property is set. The + value is 1048576. + - TagsForRunning - Indicates the TagsForRunning property is set. The value + is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param service_dns_name: The DNS name of the service. :type service_dns_name: str :param tags_for_placement: Tags for placement of this service. :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription :param tags_for_running: Tags for running of this service. :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param instance_count: The instance count. :type instance_count: int - :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up - to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted - into the number of nodes on which the instances are allowed to be placed according to the - placement constraints on the service. + :param min_instance_count: MinInstanceCount is the minimum number of + instances that must be up to meet the EnsureAvailability safety check + during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation + -1 is first converted into the number of nodes on which the instances are + allowed to be placed according to the placement constraints on the + service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum percentage of - InstanceCount that must be up to meet the EnsureAvailability safety check during operations - like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first - converted into the number of nodes on which the instances are allowed to be placed according to - the placement constraints on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum + percentage of InstanceCount that must be up to meet the EnsureAvailability + safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage + computation, -1 is first converted into the number of nodes on which the + instances are allowed to be placed according to the placement constraints + on the service. :type min_instance_percentage: int - :param instance_close_delay_duration_seconds: Duration in seconds, to wait before a stateless - instance is closed, to allow the active requests to drain gracefully. This would be effective - when the instance is closing during the application/cluster upgrade and disabling node. - The endpoint exposed on this instance is removed prior to starting the delay, which prevents - new connections to this instance. + :param instance_close_delay_duration_seconds: Duration in seconds, to wait + before a stateless instance is closed, to allow the active requests to + drain gracefully. This would be effective when the instance is closing + during the application/cluster upgrade and disabling node. + The endpoint exposed on this instance is removed prior to starting the + delay, which prevents new connections to this instance. In addition, clients that have subscribed to service endpoint change events(https://docs.microsoft.com/dotnet/api/system.fabric.fabricclient.servicemanagementclient.registerservicenotificationfilterasync), can do the following upon receiving the endpoint removal notification: - - .. code-block:: - - - Stop sending new requests to this instance. - - Close existing connections after in-flight requests have completed. - - Connect to a different instance of the service partition for future requests. + - Stop sending new requests to this instance. + - Close existing connections after in-flight requests have completed. + - Connect to a different instance of the service partition for future + requests. :type instance_close_delay_duration_seconds: str - :param instance_lifecycle_description: Defines how instances of this service will behave during - their lifecycle. - :type instance_lifecycle_description: ~azure.servicefabric.models.InstanceLifecycleDescription - :param instance_restart_wait_duration_seconds: When a stateless instance goes down, this timer - starts. When it expires Service Fabric will create a new instance on any node in the cluster. - This configuration is to reduce unnecessary creation of a new instance in situations where the - instance going down is likely to recover in a short time. For example, during an upgrade. - The default value is 0, which indicates that when stateless instance goes down, Service Fabric - will immediately start building its replacement. + :param instance_lifecycle_description: Defines how instances of this + service will behave during their lifecycle. + :type instance_lifecycle_description: + ~azure.servicefabric.models.InstanceLifecycleDescription + :param instance_restart_wait_duration_seconds: When a stateless instance + goes down, this timer starts. When it expires Service Fabric will create a + new instance on any node in the cluster. + This configuration is to reduce unnecessary creation of a new instance in + situations where the instance going down is likely to recover in a short + time. For example, during an upgrade. + The default value is 0, which indicates that when stateless instance goes + down, Service Fabric will immediately start building its replacement. :type instance_restart_wait_duration_seconds: str """ _validation = { 'service_kind': {'required': True}, 'instance_count': {'minimum': -1}, - 'min_instance_count': {'minimum': 1}, - 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -23931,6 +22408,7 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, @@ -23939,54 +22417,48 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): 'instance_restart_wait_duration_seconds': {'key': 'InstanceRestartWaitDurationSeconds', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StatelessServiceUpdateDescription, self).__init__(**kwargs) - self.service_kind = 'Stateless' # type: str self.instance_count = kwargs.get('instance_count', None) - self.min_instance_count = kwargs.get('min_instance_count', 1) - self.min_instance_percentage = kwargs.get('min_instance_percentage', 0) + self.min_instance_count = kwargs.get('min_instance_count', None) + self.min_instance_percentage = kwargs.get('min_instance_percentage', None) self.instance_close_delay_duration_seconds = kwargs.get('instance_close_delay_duration_seconds', None) self.instance_lifecycle_description = kwargs.get('instance_lifecycle_description', None) self.instance_restart_wait_duration_seconds = kwargs.get('instance_restart_wait_duration_seconds', None) + self.service_kind = 'Stateless' class StoppedChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos stops because either the user issued a stop or the time to run was up. + """Describes a Chaos event that gets generated when Chaos stops because either + the user issued a stop or the time to run was up. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param reason: Describes why Chaos stopped. Chaos can stop because of StopChaos API call or the - timeToRun provided in ChaosParameters is over. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why Chaos stopped. Chaos can stop because of + StopChaos API call or the timeToRun provided in ChaosParameters is over. :type reason: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StoppedChaosEvent, self).__init__(**kwargs) - self.kind = 'Stopped' # type: str self.reason = kwargs.get('reason', None) + self.kind = 'Stopped' class StringPropertyValue(PropertyValue): @@ -23994,10 +22466,8 @@ class StringPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str :param data: Required. The data of the property value. :type data: str """ @@ -24012,28 +22482,24 @@ class StringPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(StringPropertyValue, self).__init__(**kwargs) - self.kind = 'String' # type: str - self.data = kwargs['data'] + self.data = kwargs.get('data', None) + self.kind = 'String' class SuccessfulPropertyBatchInfo(PropertyBatchInfo): - """Derived from PropertyBatchInfo. Represents the property batch succeeding. Contains the results of any "Get" operations in the batch. + """Derived from PropertyBatchInfo. Represents the property batch succeeding. + Contains the results of any "Get" operations in the batch. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch info, determined by the results of a property - batch. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Successful", "Failed". - :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind - :param properties: A map containing the properties that were requested through any "Get" - property batch operations. The key represents the index of the "Get" operation in the original - request, in string form. The value is the property. If a property is not found, it will not be - in the map. + :param kind: Required. Constant filled by server. + :type kind: str + :param properties: A map containing the properties that were requested + through any "Get" property batch operations. The key represents the index + of the "Get" operation in the original request, in string form. The value + is the property. If a property is not found, it will not be in the map. :type properties: dict[str, ~azure.servicefabric.models.PropertyInfo] """ @@ -24046,41 +22512,37 @@ class SuccessfulPropertyBatchInfo(PropertyBatchInfo): 'properties': {'key': 'Properties', 'type': '{PropertyInfo}'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SuccessfulPropertyBatchInfo, self).__init__(**kwargs) - self.kind = 'Successful' # type: str self.properties = kwargs.get('properties', None) + self.kind = 'Successful' class SystemApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for the fabric:/System application, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state of the cluster is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for the fabric:/System application, containing + information about the data and the algorithm used by health store to + evaluate health. The evaluation is returned only when the aggregated health + state of the cluster is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the system application. The types of the unhealthy evaluations can be - DeployedApplicationsHealthEvaluation, ServicesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param kind: Required. Constant filled by server. + :type kind: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the system application. The types + of the unhealthy evaluations can be DeployedApplicationsHealthEvaluation, + ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -24088,32 +22550,30 @@ class SystemApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(SystemApplicationHealthEvaluation, self).__init__(**kwargs) - self.kind = 'SystemApplication' # type: str self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'SystemApplication' -class TcpConfig(msrest.serialization.Model): +class TcpConfig(Model): """Describes the tcp configuration for external connectivity for this network. All required parameters must be populated in order to send to Azure. :param name: Required. tcp gateway config name. :type name: str - :param port: Required. Specifies the port at which the service endpoint below needs to be - exposed. + :param port: Required. Specifies the port at which the service endpoint + below needs to be exposed. :type port: int - :param destination: Required. Describes destination endpoint for routing traffic. + :param destination: Required. Describes destination endpoint for routing + traffic. :type destination: ~azure.servicefabric.models.GatewayDestination """ @@ -24129,52 +22589,49 @@ class TcpConfig(msrest.serialization.Model): 'destination': {'key': 'destination', 'type': 'GatewayDestination'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(TcpConfig, self).__init__(**kwargs) - self.name = kwargs['name'] - self.port = kwargs['port'] - self.destination = kwargs['destination'] + self.name = kwargs.get('name', None) + self.port = kwargs.get('port', None) + self.destination = kwargs.get('destination', None) class TestErrorChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when an unexpected event occurs in the Chaos engine. -For example, due to the cluster snapshot being inconsistent, while faulting an entity, Chaos found that the entity was already faulted -- which would be an unexpected event. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param reason: Describes why TestErrorChaosEvent was generated. For example, Chaos tries to - fault a partition but finds that the partition is no longer fault tolerant, then a - TestErrorEvent gets generated with the reason stating that the partition is not fault tolerant. + """Describes a Chaos event that gets generated when an unexpected event occurs + in the Chaos engine. + For example, due to the cluster snapshot being inconsistent, while faulting + an entity, Chaos found that the entity was already faulted -- which would + be an unexpected event. + + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why TestErrorChaosEvent was generated. For + example, Chaos tries to fault a partition but finds that the partition is + no longer fault tolerant, then a TestErrorEvent gets generated with the + reason stating that the partition is not fault tolerant. :type reason: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(TestErrorChaosEvent, self).__init__(**kwargs) - self.kind = 'TestError' # type: str self.reason = kwargs.get('reason', None) + self.kind = 'TestError' class TimeBasedBackupScheduleDescription(BackupScheduleDescription): @@ -24182,20 +22639,21 @@ class TimeBasedBackupScheduleDescription(BackupScheduleDescription): All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. The kind of backup schedule, time based or frequency - based.Constant filled by server. Possible values include: "Invalid", "TimeBased", - "FrequencyBased". - :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind - :param schedule_frequency_type: Required. Describes the frequency with which to run the time - based backup schedule. Possible values include: "Invalid", "Daily", "Weekly". - :type schedule_frequency_type: str or ~azure.servicefabric.models.BackupScheduleFrequencyType - :param run_days: List of days of a week when to trigger the periodic backup. This is valid only - when the backup schedule frequency type is weekly. + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + :param schedule_frequency_type: Required. Describes the frequency with + which to run the time based backup schedule. Possible values include: + 'Invalid', 'Daily', 'Weekly' + :type schedule_frequency_type: str or + ~azure.servicefabric.models.BackupScheduleFrequencyType + :param run_days: List of days of a week when to trigger the periodic + backup. This is valid only when the backup schedule frequency type is + weekly. :type run_days: list[str or ~azure.servicefabric.models.DayOfWeek] - :param run_times: Required. Represents the list of exact time during the day in ISO8601 format. - Like '19:00:00' will represent '7PM' during the day. Date specified along with time will be - ignored. - :type run_times: list[~datetime.datetime] + :param run_times: Required. Represents the list of exact time during the + day in ISO8601 format. Like '19:00:00' will represent '7PM' during the + day. Date specified along with time will be ignored. + :type run_times: list[datetime] """ _validation = { @@ -24211,23 +22669,22 @@ class TimeBasedBackupScheduleDescription(BackupScheduleDescription): 'run_times': {'key': 'RunTimes', 'type': '[iso-8601]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(TimeBasedBackupScheduleDescription, self).__init__(**kwargs) - self.schedule_kind = 'TimeBased' # type: str - self.schedule_frequency_type = kwargs['schedule_frequency_type'] + self.schedule_frequency_type = kwargs.get('schedule_frequency_type', None) self.run_days = kwargs.get('run_days', None) - self.run_times = kwargs['run_times'] + self.run_times = kwargs.get('run_times', None) + self.schedule_kind = 'TimeBased' -class TimeOfDay(msrest.serialization.Model): +class TimeOfDay(Model): """Defines an hour and minute of the day specified in 24 hour time. - :param hour: Represents the hour of the day. Value must be between 0 and 23 inclusive. + :param hour: Represents the hour of the day. Value must be between 0 and + 23 inclusive. :type hour: int - :param minute: Represents the minute of the hour. Value must be between 0 to 59 inclusive. + :param minute: Represents the minute of the hour. Value must be between 0 + to 59 inclusive. :type minute: int """ @@ -24241,21 +22698,20 @@ class TimeOfDay(msrest.serialization.Model): 'minute': {'key': 'Minute', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(TimeOfDay, self).__init__(**kwargs) self.hour = kwargs.get('hour', None) self.minute = kwargs.get('minute', None) -class TimeRange(msrest.serialization.Model): +class TimeRange(Model): """Defines a time range in a 24 hour day specified by a start and end time. - :param start_time: Defines an hour and minute of the day specified in 24 hour time. + :param start_time: Defines an hour and minute of the day specified in 24 + hour time. :type start_time: ~azure.servicefabric.models.TimeOfDay - :param end_time: Defines an hour and minute of the day specified in 24 hour time. + :param end_time: Defines an hour and minute of the day specified in 24 + hour time. :type end_time: ~azure.servicefabric.models.TimeOfDay """ @@ -24264,29 +22720,28 @@ class TimeRange(msrest.serialization.Model): 'end_time': {'key': 'EndTime', 'type': 'TimeOfDay'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(TimeRange, self).__init__(**kwargs) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): - """Describes a partitioning scheme where an integer range is allocated evenly across a number of partitions. + """Describes a partitioning scheme where an integer range is allocated evenly + across a number of partitions. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by - server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". - :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str :param count: Required. The number of partitions. :type count: int - :param low_key: Required. String indicating the lower bound of the partition key range that + :param low_key: Required. String indicating the lower bound of the + partition key range that should be split between the partitions. :type low_key: str - :param high_key: Required. String indicating the upper bound of the partition key range that + :param high_key: Required. String indicating the upper bound of the + partition key range that should be split between the partitions. :type high_key: str """ @@ -24305,25 +22760,23 @@ class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UniformInt64RangePartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = 'UniformInt64Range' # type: str - self.count = kwargs['count'] - self.low_key = kwargs['low_key'] - self.high_key = kwargs['high_key'] + self.count = kwargs.get('count', None) + self.low_key = kwargs.get('low_key', None) + self.high_key = kwargs.get('high_key', None) + self.partition_scheme = 'UniformInt64Range' -class UnplacedReplicaInformation(msrest.serialization.Model): +class UnplacedReplicaInformation(Model): """Contains information for an unplaced replica. :param service_name: The name of the service. :type service_name: str :param partition_id: The ID of the partition. :type partition_id: str - :param unplaced_replica_details: List of reasons due to which a replica cannot be placed. + :param unplaced_replica_details: List of reasons due to which a replica + cannot be placed. :type unplaced_replica_details: list[str] """ @@ -24333,28 +22786,27 @@ class UnplacedReplicaInformation(msrest.serialization.Model): 'unplaced_replica_details': {'key': 'UnplacedReplicaDetails', 'type': '[str]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UnplacedReplicaInformation, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) self.partition_id = kwargs.get('partition_id', None) self.unplaced_replica_details = kwargs.get('unplaced_replica_details', None) -class UnprovisionApplicationTypeDescriptionInfo(msrest.serialization.Model): - """Describes the operation to unregister or unprovision an application type and its version that was registered with the Service Fabric. +class UnprovisionApplicationTypeDescriptionInfo(Model): + """Describes the operation to unregister or unprovision an application type + and its version that was registered with the Service Fabric. All required parameters must be populated in order to send to Azure. - :param application_type_version: Required. The version of the application type as defined in - the application manifest. + :param application_type_version: Required. The version of the application + type as defined in the application manifest. :type application_type_version: str - :param async_property: The flag indicating whether or not unprovision should occur - asynchronously. When set to true, the unprovision operation returns when the request is - accepted by the system, and the unprovision operation continues without any timeout limit. The - default value is false. However, we recommend setting it to true for large application packages + :param async_property: The flag indicating whether or not unprovision + should occur asynchronously. When set to true, the unprovision operation + returns when the request is accepted by the system, and the unprovision + operation continues without any timeout limit. The default value is false. + However, we recommend setting it to true for large application packages that were provisioned. :type async_property: bool """ @@ -24368,16 +22820,13 @@ class UnprovisionApplicationTypeDescriptionInfo(msrest.serialization.Model): 'async_property': {'key': 'Async', 'type': 'bool'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UnprovisionApplicationTypeDescriptionInfo, self).__init__(**kwargs) - self.application_type_version = kwargs['application_type_version'] + self.application_type_version = kwargs.get('application_type_version', None) self.async_property = kwargs.get('async_property', None) -class UnprovisionFabricDescription(msrest.serialization.Model): +class UnprovisionFabricDescription(Model): """Describes the parameters for unprovisioning a cluster. :param code_version: The cluster code package version. @@ -24391,37 +22840,40 @@ class UnprovisionFabricDescription(msrest.serialization.Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UnprovisionFabricDescription, self).__init__(**kwargs) self.code_version = kwargs.get('code_version', None) self.config_version = kwargs.get('config_version', None) -class UpdateClusterUpgradeDescription(msrest.serialization.Model): +class UpdateClusterUpgradeDescription(Model): """Parameters for updating a cluster upgrade. - :param upgrade_kind: The type of upgrade out of the following possible values. Possible values - include: "Invalid", "Rolling", "Rolling_ForceRestart". Default value: "Rolling". + :param upgrade_kind: The type of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling', + 'Rolling_ForceRestart'. Default value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeType - :param update_description: Describes the parameters for updating a rolling upgrade of - application or cluster. - :type update_description: ~azure.servicefabric.models.RollingUpgradeUpdateDescription - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than - absolute health evaluation after completion of each upgrade domain. + :param update_description: Describes the parameters for updating a rolling + upgrade of application or cluster. + :type update_description: + ~azure.servicefabric.models.RollingUpgradeUpdateDescription + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health + evaluation rather than absolute health evaluation after completion of each + upgrade domain. :type enable_delta_health_evaluation: bool - :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of - the cluster during a cluster upgrade. + :param cluster_upgrade_health_policy: Defines a health policy used to + evaluate the health of the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Defines the application health policy map used to - evaluate the health of an application or one of its children entities. - :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicies + :param application_health_policy_map: Defines the application health + policy map used to evaluate the health of an application or one of its + children entities. + :type application_health_policy_map: + ~azure.servicefabric.models.ApplicationHealthPolicies """ _attribute_map = { @@ -24433,10 +22885,7 @@ class UpdateClusterUpgradeDescription(msrest.serialization.Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicies'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UpdateClusterUpgradeDescription, self).__init__(**kwargs) self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") self.update_description = kwargs.get('update_description', None) @@ -24446,13 +22895,15 @@ def __init__( self.application_health_policy_map = kwargs.get('application_health_policy_map', None) -class UpdatePartitionLoadResult(msrest.serialization.Model): - """Specifies result of updating load for specified partitions. The output will be ordered based on the partition ID. +class UpdatePartitionLoadResult(Model): + """Specifies result of updating load for specified partitions. The output will + be ordered based on the partition ID. :param partition_id: Id of the partition. :type partition_id: str - :param partition_error_code: If OperationState is Completed - this is 0. If OperationState is - Faulted - this is an error code indicating the reason. + :param partition_error_code: If OperationState is Completed - this is 0. + If OperationState is Faulted - this is an error code indicating the + reason. :type partition_error_code: int """ @@ -24461,55 +22912,53 @@ class UpdatePartitionLoadResult(msrest.serialization.Model): 'partition_error_code': {'key': 'PartitionErrorCode', 'type': 'int'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UpdatePartitionLoadResult, self).__init__(**kwargs) self.partition_id = kwargs.get('partition_id', None) self.partition_error_code = kwargs.get('partition_error_code', None) class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): - """Represents health evaluation for delta unhealthy cluster nodes in an upgrade domain, containing health evaluations for each unhealthy node that impacted current aggregated health state. -Can be returned during cluster upgrade when cluster aggregated health state is Warning or Error. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for delta unhealthy cluster nodes in an + upgrade domain, containing health evaluations for each unhealthy node that + impacted current aggregated health state. + Can be returned during cluster upgrade when cluster aggregated health state + is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently - evaluated. + :param kind: Required. Constant filled by server. + :type kind: str + :param upgrade_domain_name: Name of the upgrade domain where nodes health + is currently evaluated. :type upgrade_domain_name: str - :param baseline_error_count: Number of upgrade domain nodes with aggregated heath state Error - in the health store at the beginning of the cluster upgrade. + :param baseline_error_count: Number of upgrade domain nodes with + aggregated heath state Error in the health store at the beginning of the + cluster upgrade. :type baseline_error_count: long - :param baseline_total_count: Total number of upgrade domain nodes in the health store at the - beginning of the cluster upgrade. + :param baseline_total_count: Total number of upgrade domain nodes in the + health store at the beginning of the cluster upgrade. :type baseline_total_count: long - :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of upgrade domain delta - unhealthy nodes from the ClusterUpgradeHealthPolicy. + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of + upgrade domain delta unhealthy nodes from the ClusterUpgradeHealthPolicy. :type max_percent_delta_unhealthy_nodes: int - :param total_count: Total number of upgrade domain nodes in the health store. + :param total_count: Total number of upgrade domain nodes in the health + store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -24517,9 +22966,9 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, @@ -24528,27 +22977,24 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UpgradeDomainDeltaNodesCheckHealthEvaluation, self).__init__(**kwargs) - self.kind = 'UpgradeDomainDeltaNodesCheck' # type: str self.upgrade_domain_name = kwargs.get('upgrade_domain_name', None) self.baseline_error_count = kwargs.get('baseline_error_count', None) self.baseline_total_count = kwargs.get('baseline_total_count', None) self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'UpgradeDomainDeltaNodesCheck' -class UpgradeDomainInfo(msrest.serialization.Model): +class UpgradeDomainInfo(Model): """Information about an upgrade domain. - :param name: The name of the upgrade domain. + :param name: The name of the upgrade domain :type name: str - :param state: The state of the upgrade domain. Possible values include: "Invalid", "Pending", - "InProgress", "Completed". + :param state: The state of the upgrade domain. Possible values include: + 'Invalid', 'Pending', 'InProgress', 'Completed' :type state: str or ~azure.servicefabric.models.UpgradeDomainState """ @@ -24557,48 +23003,45 @@ class UpgradeDomainInfo(msrest.serialization.Model): 'state': {'key': 'State', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UpgradeDomainInfo, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.state = kwargs.get('state', None) class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for cluster nodes in an upgrade domain, containing health evaluations for each unhealthy node that impacted current aggregated health state. Can be returned when evaluating cluster health during cluster upgrade and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for cluster nodes in an upgrade domain, + containing health evaluations for each unhealthy node that impacted current + aggregated health state. Can be returned when evaluating cluster health + during cluster upgrade and the aggregated health state is either Error or + Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently - evaluated. + :param kind: Required. Constant filled by server. + :type kind: str + :param upgrade_domain_name: Name of the upgrade domain where nodes health + is currently evaluated. :type upgrade_domain_name: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes from the - ClusterHealthPolicy. + :param max_percent_unhealthy_nodes: Maximum allowed percentage of + unhealthy nodes from the ClusterHealthPolicy. :type max_percent_unhealthy_nodes: int :param total_count: Total number of nodes in the current upgrade domain. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -24606,31 +23049,29 @@ class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UpgradeDomainNodesHealthEvaluation, self).__init__(**kwargs) - self.kind = 'UpgradeDomainNodes' # type: str self.upgrade_domain_name = kwargs.get('upgrade_domain_name', None) self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', None) self.total_count = kwargs.get('total_count', None) self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.kind = 'UpgradeDomainNodes' -class UpgradeOrchestrationServiceState(msrest.serialization.Model): +class UpgradeOrchestrationServiceState(Model): """Service state of Service Fabric Upgrade Orchestration Service. - :param service_state: The state of Service Fabric Upgrade Orchestration Service. + :param service_state: The state of Service Fabric Upgrade Orchestration + Service. :type service_state: str """ @@ -24638,26 +23079,26 @@ class UpgradeOrchestrationServiceState(msrest.serialization.Model): 'service_state': {'key': 'ServiceState', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UpgradeOrchestrationServiceState, self).__init__(**kwargs) self.service_state = kwargs.get('service_state', None) -class UpgradeOrchestrationServiceStateSummary(msrest.serialization.Model): +class UpgradeOrchestrationServiceStateSummary(Model): """Service state summary of Service Fabric Upgrade Orchestration Service. :param current_code_version: The current code version of the cluster. :type current_code_version: str - :param current_manifest_version: The current manifest version of the cluster. + :param current_manifest_version: The current manifest version of the + cluster. :type current_manifest_version: str :param target_code_version: The target code version of the cluster. :type target_code_version: str - :param target_manifest_version: The target manifest version of the cluster. + :param target_manifest_version: The target manifest version of the + cluster. :type target_manifest_version: str - :param pending_upgrade_type: The type of the pending upgrade of the cluster. + :param pending_upgrade_type: The type of the pending upgrade of the + cluster. :type pending_upgrade_type: str """ @@ -24669,10 +23110,7 @@ class UpgradeOrchestrationServiceStateSummary(msrest.serialization.Model): 'pending_upgrade_type': {'key': 'PendingUpgradeType', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UpgradeOrchestrationServiceStateSummary, self).__init__(**kwargs) self.current_code_version = kwargs.get('current_code_version', None) self.current_manifest_version = kwargs.get('current_manifest_version', None) @@ -24681,14 +23119,14 @@ def __init__( self.pending_upgrade_type = kwargs.get('pending_upgrade_type', None) -class UploadChunkRange(msrest.serialization.Model): +class UploadChunkRange(Model): """Information about which portion of the file to upload. - :param start_position: The start position of the portion of the file. It's represented by the - number of bytes. + :param start_position: The start position of the portion of the file. It's + represented by the number of bytes. :type start_position: str - :param end_position: The end position of the portion of the file. It's represented by the - number of bytes. + :param end_position: The end position of the portion of the file. It's + represented by the number of bytes. :type end_position: str """ @@ -24697,21 +23135,19 @@ class UploadChunkRange(msrest.serialization.Model): 'end_position': {'key': 'EndPosition', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UploadChunkRange, self).__init__(**kwargs) self.start_position = kwargs.get('start_position', None) self.end_position = kwargs.get('end_position', None) -class UploadSession(msrest.serialization.Model): +class UploadSession(Model): """Information about a image store upload session. - :param upload_sessions: When querying upload session by upload session ID, the result contains - only one upload session. When querying upload session by image store relative path, the result - might contain multiple upload sessions. + :param upload_sessions: When querying upload session by upload session ID, + the result contains only one upload session. When querying upload session + by image store relative path, the result might contain multiple upload + sessions. :type upload_sessions: list[~azure.servicefabric.models.UploadSessionInfo] """ @@ -24719,28 +23155,28 @@ class UploadSession(msrest.serialization.Model): 'upload_sessions': {'key': 'UploadSessions', 'type': '[UploadSessionInfo]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UploadSession, self).__init__(**kwargs) self.upload_sessions = kwargs.get('upload_sessions', None) -class UploadSessionInfo(msrest.serialization.Model): - """Information about an image store upload session. A session is associated with a relative path in the image store. +class UploadSessionInfo(Model): + """Information about an image store upload session. A session is associated + with a relative path in the image store. - :param store_relative_path: The remote location within image store. This path is relative to - the image store root. + :param store_relative_path: The remote location within image store. This + path is relative to the image store root. :type store_relative_path: str - :param session_id: A unique ID of the upload session. A session ID can be reused only if the - session was committed or removed. + :param session_id: A unique ID of the upload session. A session ID can be + reused only if the session was committed or removed. :type session_id: str - :param modified_date: The date and time when the upload session was last modified. - :type modified_date: ~datetime.datetime + :param modified_date: The date and time when the upload session was last + modified. + :type modified_date: datetime :param file_size: The size in bytes of the uploading file. :type file_size: str - :param expected_ranges: List of chunk ranges that image store has not received yet. + :param expected_ranges: List of chunk ranges that image store has not + received yet. :type expected_ranges: list[~azure.servicefabric.models.UploadChunkRange] """ @@ -24752,10 +23188,7 @@ class UploadSessionInfo(msrest.serialization.Model): 'expected_ranges': {'key': 'ExpectedRanges', 'type': '[UploadChunkRange]'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UploadSessionInfo, self).__init__(**kwargs) self.store_relative_path = kwargs.get('store_relative_path', None) self.session_id = kwargs.get('session_id', None) @@ -24764,12 +23197,13 @@ def __init__( self.expected_ranges = kwargs.get('expected_ranges', None) -class UsageInfo(msrest.serialization.Model): - """Information about how much space and how many files in the file system the ImageStore is using in this category. +class UsageInfo(Model): + """Information about how much space and how many files in the file system the + ImageStore is using in this category. - :param used_space: the size of all files in this category. + :param used_space: the size of all files in this category :type used_space: str - :param file_count: the number of all files in this category. + :param file_count: the number of all files in this category :type file_count: str """ @@ -24778,10 +23212,7 @@ class UsageInfo(msrest.serialization.Model): 'file_count': {'key': 'FileCount', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(UsageInfo, self).__init__(**kwargs) self.used_space = kwargs.get('used_space', None) self.file_count = kwargs.get('file_count', None) @@ -24792,50 +23223,48 @@ class ValidationFailedChaosEvent(ChaosEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param reason: Describes why the ValidationFailedChaosEvent was generated. This may happen - because more than MaxPercentUnhealthyNodes are unhealthy for more than - MaxClusterStabilizationTimeout. This reason will be in the Reason property of the - ValidationFailedChaosEvent as a string. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why the ValidationFailedChaosEvent was generated. + This may happen because more than MaxPercentUnhealthyNodes are unhealthy + for more than MaxClusterStabilizationTimeout. This reason will be in the + Reason property of the ValidationFailedChaosEvent as a string. :type reason: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(ValidationFailedChaosEvent, self).__init__(**kwargs) - self.kind = 'ValidationFailed' # type: str self.reason = kwargs.get('reason', None) + self.kind = 'ValidationFailed' -class VolumeProviderParametersAzureFile(msrest.serialization.Model): +class VolumeProviderParametersAzureFile(Model): """This type describes a volume provided by an Azure Files file share. All required parameters must be populated in order to send to Azure. - :param account_name: Required. Name of the Azure storage account for the File Share. + :param account_name: Required. Name of the Azure storage account for the + File Share. :type account_name: str - :param account_key: Access key of the Azure storage account for the File Share. + :param account_key: Access key of the Azure storage account for the File + Share. :type account_key: str - :param share_name: Required. Name of the Azure Files file share that provides storage for the - volume. + :param share_name: Required. Name of the Azure Files file share that + provides storage for the volume. :type share_name: str """ @@ -24850,20 +23279,18 @@ class VolumeProviderParametersAzureFile(msrest.serialization.Model): 'share_name': {'key': 'shareName', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(VolumeProviderParametersAzureFile, self).__init__(**kwargs) - self.account_name = kwargs['account_name'] + self.account_name = kwargs.get('account_name', None) self.account_key = kwargs.get('account_key', None) - self.share_name = kwargs['share_name'] + self.share_name = kwargs.get('share_name', None) -class VolumeResourceDescription(msrest.serialization.Model): +class VolumeResourceDescription(Model): """This type describes a volume resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. @@ -24871,23 +23298,26 @@ class VolumeResourceDescription(msrest.serialization.Model): :type name: str :param description: User readable description of the volume. :type description: str - :ivar status: Status of the volume. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the volume. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the volume. + :ivar status_details: Gives additional information about the current + status of the volume. :vartype status_details: str - :param provider: Required. Provider of the volume. Possible values include: "SFAzureFile". - :type provider: str or ~azure.servicefabric.models.VolumeProvider - :param azure_file_parameters: This type describes a volume provided by an Azure Files file - share. - :type azure_file_parameters: ~azure.servicefabric.models.VolumeProviderParametersAzureFile + :ivar provider: Required. Provider of the volume. Default value: + "SFAzureFile" . + :vartype provider: str + :param azure_file_parameters: This type describes a volume provided by an + Azure Files file share. + :type azure_file_parameters: + ~azure.servicefabric.models.VolumeProviderParametersAzureFile """ _validation = { 'name': {'required': True}, 'status': {'readonly': True}, 'status_details': {'readonly': True}, - 'provider': {'required': True}, + 'provider': {'required': True, 'constant': True}, } _attribute_map = { @@ -24899,31 +23329,29 @@ class VolumeResourceDescription(msrest.serialization.Model): 'azure_file_parameters': {'key': 'properties.azureFileParameters', 'type': 'VolumeProviderParametersAzureFile'}, } - def __init__( - self, - **kwargs - ): + provider = "SFAzureFile" + + def __init__(self, **kwargs): super(VolumeResourceDescription, self).__init__(**kwargs) - self.name = kwargs['name'] + self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) self.status = None self.status_details = None - self.provider = kwargs['provider'] self.azure_file_parameters = kwargs.get('azure_file_parameters', None) class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the replica build operation to finish. This indicates that there is a replica that is going through the copy or is providing data for building another replica. Bring the node down will abort this copy operation which are typically expensive involving data movements. + """Safety check that waits for the replica build operation to finish. This + indicates that there is a replica that is going through the copy or is + providing data for building another replica. Bring the node down will abort + this copy operation which are typically expensive involving data movements. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -24936,26 +23364,21 @@ class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(WaitForInbuildReplicaSafetyCheck, self).__init__(**kwargs) - self.kind = 'WaitForInbuildReplica' # type: str + self.kind = 'WaitForInbuildReplica' class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the primary replica that was moved out of the node due to upgrade to be placed back again on that node. + """Safety check that waits for the primary replica that was moved out of the + node due to upgrade to be placed back again on that node. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -24968,26 +23391,22 @@ class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(WaitForPrimaryPlacementSafetyCheck, self).__init__(**kwargs) - self.kind = 'WaitForPrimaryPlacement' # type: str + self.kind = 'WaitForPrimaryPlacement' class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the primary replica to be moved out of the node before starting an upgrade to ensure the availability of the primary replica for the partition. + """Safety check that waits for the primary replica to be moved out of the node + before starting an upgrade to ensure the availability of the primary + replica for the partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -25000,26 +23419,21 @@ class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(WaitForPrimarySwapSafetyCheck, self).__init__(**kwargs) - self.kind = 'WaitForPrimarySwap' # type: str + self.kind = 'WaitForPrimarySwap' class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the current reconfiguration of the partition to be completed before starting an upgrade. + """Safety check that waits for the current reconfiguration of the partition to + be completed before starting an upgrade. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -25032,45 +23446,40 @@ class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(WaitForReconfigurationSafetyCheck, self).__init__(**kwargs) - self.kind = 'WaitForReconfiguration' # type: str + self.kind = 'WaitForReconfiguration' class WaitingChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos is waiting for the cluster to become ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. + """Describes a Chaos event that gets generated when Chaos is waiting for the + cluster to become ready for faulting, for example, Chaos may be waiting for + the on-going upgrade to finish. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param reason: Describes why the WaitingChaosEvent was generated, for example, due to a cluster - upgrade. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why the WaitingChaosEvent was generated, for + example, due to a cluster upgrade. :type reason: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super(WaitingChaosEvent, self).__init__(**kwargs) - self.kind = 'Waiting' # type: str self.reason = kwargs.get('reason', None) + self.kind = 'Waiting' diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models_py3.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models_py3.py index 9ea894eb0833..2438d8644c9f 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models_py3.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_models_py3.py @@ -1,21 +1,19 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -import datetime -from typing import Dict, List, Optional, Union +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError -from azure.core.exceptions import HttpResponseError -import msrest.serialization -from ._service_fabric_client_apis_enums import * - - -class AadMetadata(msrest.serialization.Model): +class AadMetadata(Model): """Azure Active Directory metadata used for secured connection to cluster. :param authority: The AAD authority url. @@ -41,17 +39,7 @@ class AadMetadata(msrest.serialization.Model): 'tenant': {'key': 'tenant', 'type': 'str'}, } - def __init__( - self, - *, - authority: Optional[str] = None, - client: Optional[str] = None, - cluster: Optional[str] = None, - login: Optional[str] = None, - redirect: Optional[str] = None, - tenant: Optional[str] = None, - **kwargs - ): + def __init__(self, *, authority: str=None, client: str=None, cluster: str=None, login: str=None, redirect: str=None, tenant: str=None, **kwargs) -> None: super(AadMetadata, self).__init__(**kwargs) self.authority = authority self.client = client @@ -61,12 +49,14 @@ def __init__( self.tenant = tenant -class AadMetadataObject(msrest.serialization.Model): - """Azure Active Directory metadata object used for secured connection to cluster. +class AadMetadataObject(Model): + """Azure Active Directory metadata object used for secured connection to + cluster. :param type: The client authentication method. :type type: str - :param metadata: Azure Active Directory metadata used for secured connection to cluster. + :param metadata: Azure Active Directory metadata used for secured + connection to cluster. :type metadata: ~azure.servicefabric.models.AadMetadata """ @@ -75,30 +65,23 @@ class AadMetadataObject(msrest.serialization.Model): 'metadata': {'key': 'metadata', 'type': 'AadMetadata'}, } - def __init__( - self, - *, - type: Optional[str] = None, - metadata: Optional["AadMetadata"] = None, - **kwargs - ): + def __init__(self, *, type: str=None, metadata=None, **kwargs) -> None: super(AadMetadataObject, self).__init__(**kwargs) self.type = type self.metadata = metadata -class ScalingMechanismDescription(msrest.serialization.Model): +class ScalingMechanismDescription(Model): """Describes the mechanism for performing a scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AddRemoveIncrementalNamedPartitionScalingMechanism, PartitionInstanceCountScaleMechanism. + sub-classes are: PartitionInstanceCountScaleMechanism, + AddRemoveIncrementalNamedPartitionScalingMechanism All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. - Possible values include: "Invalid", "PartitionInstanceCount", - "AddRemoveIncrementalNamedPartition". - :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -110,32 +93,30 @@ class ScalingMechanismDescription(msrest.serialization.Model): } _subtype_map = { - 'kind': {'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism', 'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism'} + 'kind': {'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism', 'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(ScalingMechanismDescription, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescription): - """Represents a scaling mechanism for adding or removing named partitions of a stateless service. Partition names are in the format '0','1''N-1'. + """Represents a scaling mechanism for adding or removing named partitions of a + stateless service. Partition names are in the format '0','1''N-1'. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. - Possible values include: "Invalid", "PartitionInstanceCount", - "AddRemoveIncrementalNamedPartition". - :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind - :param min_partition_count: Required. Minimum number of named partitions of the service. + :param kind: Required. Constant filled by server. + :type kind: str + :param min_partition_count: Required. Minimum number of named partitions + of the service. :type min_partition_count: int - :param max_partition_count: Required. Maximum number of named partitions of the service. + :param max_partition_count: Required. Maximum number of named partitions + of the service. :type max_partition_count: int - :param scale_increment: Required. The number of instances to add or remove during a scaling - operation. + :param scale_increment: Required. The number of instances to add or remove + during a scaling operation. :type scale_increment: int """ @@ -153,32 +134,25 @@ class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescrip 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, } - def __init__( - self, - *, - min_partition_count: int, - max_partition_count: int, - scale_increment: int, - **kwargs - ): + def __init__(self, *, min_partition_count: int, max_partition_count: int, scale_increment: int, **kwargs) -> None: super(AddRemoveIncrementalNamedPartitionScalingMechanism, self).__init__(**kwargs) - self.kind = 'AddRemoveIncrementalNamedPartition' # type: str self.min_partition_count = min_partition_count self.max_partition_count = max_partition_count self.scale_increment = scale_increment + self.kind = 'AddRemoveIncrementalNamedPartition' -class AutoScalingMechanism(msrest.serialization.Model): - """Describes the mechanism for performing auto scaling operation. Derived classes will describe the actual mechanism. +class AutoScalingMechanism(Model): + """Describes the mechanism for performing auto scaling operation. Derived + classes will describe the actual mechanism. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AddRemoveReplicaScalingMechanism. + sub-classes are: AddRemoveReplicaScalingMechanism All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling mechanism.Constant filled by server. Possible - values include: "AddRemoveReplica". - :type kind: str or ~azure.servicefabric.models.AutoScalingMechanismKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -193,30 +167,27 @@ class AutoScalingMechanism(msrest.serialization.Model): 'kind': {'AddRemoveReplica': 'AddRemoveReplicaScalingMechanism'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(AutoScalingMechanism, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class AddRemoveReplicaScalingMechanism(AutoScalingMechanism): - """Describes the horizontal auto scaling mechanism that adds or removes replicas (containers or container groups). + """Describes the horizontal auto scaling mechanism that adds or removes + replicas (containers or container groups). All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling mechanism.Constant filled by server. Possible - values include: "AddRemoveReplica". - :type kind: str or ~azure.servicefabric.models.AutoScalingMechanismKind - :param min_count: Required. Minimum number of containers (scale down won't be performed below - this number). + :param kind: Required. Constant filled by server. + :type kind: str + :param min_count: Required. Minimum number of containers (scale down won't + be performed below this number). :type min_count: int - :param max_count: Required. Maximum number of containers (scale up won't be performed above - this number). + :param max_count: Required. Maximum number of containers (scale up won't + be performed above this number). :type max_count: int - :param scale_increment: Required. Each time auto scaling is performed, this number of - containers will be added or removed. + :param scale_increment: Required. Each time auto scaling is performed, + this number of containers will be added or removed. :type scale_increment: int """ @@ -234,28 +205,21 @@ class AddRemoveReplicaScalingMechanism(AutoScalingMechanism): 'scale_increment': {'key': 'scaleIncrement', 'type': 'int'}, } - def __init__( - self, - *, - min_count: int, - max_count: int, - scale_increment: int, - **kwargs - ): + def __init__(self, *, min_count: int, max_count: int, scale_increment: int, **kwargs) -> None: super(AddRemoveReplicaScalingMechanism, self).__init__(**kwargs) - self.kind = 'AddRemoveReplica' # type: str self.min_count = min_count self.max_count = max_count self.scale_increment = scale_increment + self.kind = 'AddRemoveReplica' -class AnalysisEventMetadata(msrest.serialization.Model): +class AnalysisEventMetadata(Model): """Metadata about an Analysis Event. :param delay: The analysis delay. - :type delay: ~datetime.timedelta + :type delay: timedelta :param duration: The duration of analysis. - :type duration: ~datetime.timedelta + :type duration: timedelta """ _attribute_map = { @@ -263,38 +227,33 @@ class AnalysisEventMetadata(msrest.serialization.Model): 'duration': {'key': 'Duration', 'type': 'duration'}, } - def __init__( - self, - *, - delay: Optional[datetime.timedelta] = None, - duration: Optional[datetime.timedelta] = None, - **kwargs - ): + def __init__(self, *, delay=None, duration=None, **kwargs) -> None: super(AnalysisEventMetadata, self).__init__(**kwargs) self.delay = delay self.duration = duration -class BackupConfigurationInfo(msrest.serialization.Model): +class BackupConfigurationInfo(Model): """Describes the backup configuration information. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationBackupConfigurationInfo, PartitionBackupConfigurationInfo, ServiceBackupConfigurationInfo. + sub-classes are: ApplicationBackupConfigurationInfo, + ServiceBackupConfigurationInfo, PartitionBackupConfigurationInfo All required parameters must be populated in order to send to Azure. - :param kind: Required. The entity type of a Service Fabric entity such as Application, Service - or a Partition where periodic backups can be enabled.Constant filled by server. Possible - values include: "Invalid", "Partition", "Service", "Application". - :type kind: str or ~azure.servicefabric.models.BackupEntityKind - :param policy_name: The name of the backup policy which is applicable to this Service Fabric - application or service or partition. + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup policy is applied. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -302,49 +261,45 @@ class BackupConfigurationInfo(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo'} + 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo'} } - def __init__( - self, - *, - policy_name: Optional[str] = None, - policy_inherited_from: Optional[Union[str, "BackupPolicyScope"]] = None, - suspension_info: Optional["BackupSuspensionInfo"] = None, - **kwargs - ): + def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, **kwargs) -> None: super(BackupConfigurationInfo, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.policy_name = policy_name self.policy_inherited_from = policy_inherited_from self.suspension_info = suspension_info + self.kind = None class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information for a specific Service Fabric application specifying what backup policy is being applied and suspend description, if any. + """Backup configuration information for a specific Service Fabric application + specifying what backup policy is being applied and suspend description, if + any. All required parameters must be populated in order to send to Azure. - :param kind: Required. The entity type of a Service Fabric entity such as Application, Service - or a Partition where periodic backups can be enabled.Constant filled by server. Possible - values include: "Invalid", "Partition", "Service", "Application". - :type kind: str or ~azure.servicefabric.models.BackupEntityKind - :param policy_name: The name of the backup policy which is applicable to this Service Fabric - application or service or partition. + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup policy is applied. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str """ @@ -353,39 +308,30 @@ class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__( - self, - *, - policy_name: Optional[str] = None, - policy_inherited_from: Optional[Union[str, "BackupPolicyScope"]] = None, - suspension_info: Optional["BackupSuspensionInfo"] = None, - application_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, application_name: str=None, **kwargs) -> None: super(ApplicationBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info, **kwargs) - self.kind = 'Application' # type: str self.application_name = application_name + self.kind = 'Application' -class BackupEntity(msrest.serialization.Model): +class BackupEntity(Model): """Describes the Service Fabric entity that is configured for backup. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationBackupEntity, PartitionBackupEntity, ServiceBackupEntity. + sub-classes are: ApplicationBackupEntity, ServiceBackupEntity, + PartitionBackupEntity All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, - Service or a Partition where periodic backups can be enabled.Constant filled by server. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str """ _validation = { @@ -397,15 +343,12 @@ class BackupEntity(msrest.serialization.Model): } _subtype_map = { - 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Partition': 'PartitionBackupEntity', 'Service': 'ServiceBackupEntity'} + 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Service': 'ServiceBackupEntity', 'Partition': 'PartitionBackupEntity'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(BackupEntity, self).__init__(**kwargs) - self.entity_kind = None # type: Optional[str] + self.entity_kind = None class ApplicationBackupEntity(BackupEntity): @@ -413,11 +356,10 @@ class ApplicationBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, - Service or a Partition where periodic backups can be enabled.Constant filled by server. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str """ @@ -430,37 +372,38 @@ class ApplicationBackupEntity(BackupEntity): 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__( - self, - *, - application_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, application_name: str=None, **kwargs) -> None: super(ApplicationBackupEntity, self).__init__(**kwargs) - self.entity_kind = 'Application' # type: str self.application_name = application_name - - -class ApplicationCapacityDescription(msrest.serialization.Model): - """Describes capacity information for services of this application. This description can be used for describing the following. - - -* Reserving the capacity for the services on the nodes -* Limiting the total number of nodes that services of this application can run on -* Limiting the custom capacity metrics to limit the total consumption of this metric by the services of this application. - - :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity - for this application. Note that this does not mean that the services of this application will - be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. - The value of this property cannot be more than the value of the MaximumNodes property. + self.entity_kind = 'Application' + + +class ApplicationCapacityDescription(Model): + """Describes capacity information for services of this application. This + description can be used for describing the following. + - Reserving the capacity for the services on the nodes + - Limiting the total number of nodes that services of this application can + run on + - Limiting the custom capacity metrics to limit the total consumption of + this metric by the services of this application. + + :param minimum_nodes: The minimum number of nodes where Service Fabric + will reserve capacity for this application. Note that this does not mean + that the services of this application will be placed on all of those + nodes. If this property is set to zero, no capacity will be reserved. The + value of this property cannot be more than the value of the MaximumNodes + property. :type minimum_nodes: long - :param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity - for this application. Note that this does not mean that the services of this application will - be placed on all of those nodes. By default, the value of this property is zero and it means - that the services can be placed on any node. + :param maximum_nodes: The maximum number of nodes where Service Fabric + will reserve capacity for this application. Note that this does not mean + that the services of this application will be placed on all of those + nodes. By default, the value of this property is zero and it means that + the services can be placed on any node. Default value: 0 . :type maximum_nodes: long - :param application_metrics: List of application capacity metric description. - :type application_metrics: list[~azure.servicefabric.models.ApplicationMetricDescription] + :param application_metrics: List of application capacity metric + description. + :type application_metrics: + list[~azure.servicefabric.models.ApplicationMetricDescription] """ _validation = { @@ -474,179 +417,127 @@ class ApplicationCapacityDescription(msrest.serialization.Model): 'application_metrics': {'key': 'ApplicationMetrics', 'type': '[ApplicationMetricDescription]'}, } - def __init__( - self, - *, - minimum_nodes: Optional[int] = None, - maximum_nodes: Optional[int] = 0, - application_metrics: Optional[List["ApplicationMetricDescription"]] = None, - **kwargs - ): + def __init__(self, *, minimum_nodes: int=None, maximum_nodes: int=0, application_metrics=None, **kwargs) -> None: super(ApplicationCapacityDescription, self).__init__(**kwargs) self.minimum_nodes = minimum_nodes self.maximum_nodes = maximum_nodes self.application_metrics = application_metrics -class FabricEvent(msrest.serialization.Model): +class FabricEvent(Model): """Represents the base for all Fabric Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, + NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'ApplicationEvent': 'ApplicationEvent', 'ClusterEvent': 'ClusterEvent', 'ContainerInstanceEvent': 'ContainerInstanceEvent', 'NodeEvent': 'NodeEvent', 'PartitionEvent': 'PartitionEvent', 'ReplicaEvent': 'ReplicaEvent', 'ServiceEvent': 'ServiceEvent'} } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(FabricEvent, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.event_instance_id = event_instance_id self.category = category self.time_stamp = time_stamp self.has_correlated_events = has_correlated_events + self.kind = None class ApplicationEvent(FabricEvent): """Represents the base for all Application Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationContainerInstanceExitedEvent, ApplicationCreatedEvent, ApplicationDeletedEvent, ApplicationHealthReportExpiredEvent, ApplicationNewHealthReportEvent, ApplicationProcessExitedEvent, ApplicationUpgradeCompletedEvent, ApplicationUpgradeDomainCompletedEvent, ApplicationUpgradeRollbackCompletedEvent, ApplicationUpgradeRollbackStartedEvent, ApplicationUpgradeStartedEvent, ChaosCodePackageRestartScheduledEvent, DeployedApplicationHealthReportExpiredEvent, DeployedApplicationNewHealthReportEvent, DeployedServicePackageHealthReportExpiredEvent, DeployedServicePackageNewHealthReportEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: ApplicationCreatedEvent, ApplicationDeletedEvent, + ApplicationNewHealthReportEvent, ApplicationHealthReportExpiredEvent, + ApplicationUpgradeCompletedEvent, ApplicationUpgradeDomainCompletedEvent, + ApplicationUpgradeRollbackCompletedEvent, + ApplicationUpgradeRollbackStartedEvent, ApplicationUpgradeStartedEvent, + DeployedApplicationNewHealthReportEvent, + DeployedApplicationHealthReportExpiredEvent, ApplicationProcessExitedEvent, + ApplicationContainerInstanceExitedEvent, + DeployedServicePackageNewHealthReportEvent, + DeployedServicePackageHealthReportExpiredEvent, + ChaosCodePackageRestartScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ApplicationContainerInstanceExited': 'ApplicationContainerInstanceExitedEvent', 'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationNewHealthReport': 'ApplicationNewHealthReportEvent', 'ApplicationProcessExited': 'ApplicationProcessExitedEvent', 'ApplicationUpgradeCompleted': 'ApplicationUpgradeCompletedEvent', 'ApplicationUpgradeDomainCompleted': 'ApplicationUpgradeDomainCompletedEvent', 'ApplicationUpgradeRollbackCompleted': 'ApplicationUpgradeRollbackCompletedEvent', 'ApplicationUpgradeRollbackStarted': 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStarted': 'ApplicationUpgradeStartedEvent', 'ChaosCodePackageRestartScheduled': 'ChaosCodePackageRestartScheduledEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'DeployedApplicationNewHealthReport': 'DeployedApplicationNewHealthReportEvent', 'DeployedServicePackageHealthReportExpired': 'DeployedServicePackageHealthReportExpiredEvent', 'DeployedServicePackageNewHealthReport': 'DeployedServicePackageNewHealthReportEvent'} - } - - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + 'kind': {'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationNewHealthReport': 'ApplicationNewHealthReportEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationUpgradeCompleted': 'ApplicationUpgradeCompletedEvent', 'ApplicationUpgradeDomainCompleted': 'ApplicationUpgradeDomainCompletedEvent', 'ApplicationUpgradeRollbackCompleted': 'ApplicationUpgradeRollbackCompletedEvent', 'ApplicationUpgradeRollbackStarted': 'ApplicationUpgradeRollbackStartedEvent', 'ApplicationUpgradeStarted': 'ApplicationUpgradeStartedEvent', 'DeployedApplicationNewHealthReport': 'DeployedApplicationNewHealthReportEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'ApplicationProcessExited': 'ApplicationProcessExitedEvent', 'ApplicationContainerInstanceExited': 'ApplicationContainerInstanceExitedEvent', 'DeployedServicePackageNewHealthReport': 'DeployedServicePackageNewHealthReportEvent', 'DeployedServicePackageHealthReportExpired': 'DeployedServicePackageHealthReportExpiredEvent', 'ChaosCodePackageRestartScheduled': 'ChaosCodePackageRestartScheduledEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ApplicationEvent' # type: str self.application_id = application_id + self.kind = 'ApplicationEvent' class ApplicationContainerInstanceExitedEvent(ApplicationEvent): @@ -654,50 +545,32 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param service_name: Required. Name of Service. :type service_name: str :param service_package_name: Required. Name of Service package. :type service_package_name: str - :param service_package_activation_id: Required. Activation Id of Service package. + :param service_package_activation_id: Required. Activation Id of Service + package. :type service_package_activation_id: str :param is_exclusive: Required. Indicates IsExclusive flag. :type is_exclusive: bool @@ -713,16 +586,17 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): :type host_id: str :param exit_code: Required. Exit code of process. :type exit_code: long - :param unexpected_termination: Required. Indicates if termination is unexpected. + :param unexpected_termination: Required. Indicates if termination is + unexpected. :type unexpected_termination: bool :param start_time: Required. Start time of process. - :type start_time: ~datetime.datetime + :type start_time: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'service_name': {'required': True}, 'service_package_name': {'required': True}, @@ -739,11 +613,11 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, @@ -759,30 +633,8 @@ class ApplicationContainerInstanceExitedEvent(ApplicationEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - service_name: str, - service_package_name: str, - service_package_activation_id: str, - is_exclusive: bool, - code_package_name: str, - entry_point_type: str, - image_name: str, - container_name: str, - host_id: str, - exit_code: int, - unexpected_termination: bool, - start_time: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_name: str, service_package_name: str, service_package_activation_id: str, is_exclusive: bool, code_package_name: str, entry_point_type: str, image_name: str, container_name: str, host_id: str, exit_code: int, unexpected_termination: bool, start_time, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationContainerInstanceExitedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationContainerInstanceExited' # type: str self.service_name = service_name self.service_package_name = service_package_name self.service_package_activation_id = service_package_activation_id @@ -795,6 +647,7 @@ def __init__( self.exit_code = exit_code self.unexpected_termination = unexpected_termination self.start_time = start_time + self.kind = 'ApplicationContainerInstanceExited' class ApplicationCreatedEvent(ApplicationEvent): @@ -802,44 +655,25 @@ class ApplicationCreatedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -850,9 +684,9 @@ class ApplicationCreatedEvent(ApplicationEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -860,35 +694,23 @@ class ApplicationCreatedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_type_name: str, - application_type_version: str, - application_definition_kind: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, application_definition_kind: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationCreatedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationCreated' # type: str self.application_type_name = application_type_name self.application_type_version = application_type_version self.application_definition_kind = application_definition_kind + self.kind = 'ApplicationCreated' class ApplicationDeletedEvent(ApplicationEvent): @@ -896,44 +718,25 @@ class ApplicationDeletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -942,68 +745,62 @@ class ApplicationDeletedEvent(ApplicationEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_type_name: str, - application_type_version: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationDeletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationDeleted' # type: str self.application_type_name = application_type_name self.application_type_version = application_type_version + self.kind = 'ApplicationDeleted' -class ApplicationDescription(msrest.serialization.Model): +class ApplicationDescription(Model): """Describes a Service Fabric application. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the application, including the 'fabric:' URI scheme. + :param name: Required. The name of the application, including the + 'fabric:' URI scheme. :type name: str - :param type_name: Required. The application type name as defined in the application manifest. - :type type_name: str - :param type_version: Required. The version of the application type as defined in the + :param type_name: Required. The application type name as defined in the application manifest. + :type type_name: str + :param type_version: Required. The version of the application type as + defined in the application manifest. :type type_version: str - :param parameter_list: List of application parameters with overridden values from their default - values specified in the application manifest. - :type parameter_list: list[~azure.servicefabric.models.ApplicationParameter] - :param application_capacity: Describes capacity information for services of this application. - This description can be used for describing the following. - - - * Reserving the capacity for the services on the nodes - * Limiting the total number of nodes that services of this application can run on - * Limiting the custom capacity metrics to limit the total consumption of this metric by the - services of this application. - :type application_capacity: ~azure.servicefabric.models.ApplicationCapacityDescription - :param managed_application_identity: Managed application identity description. + :param parameter_list: List of application parameters with overridden + values from their default values specified in the application manifest. + :type parameter_list: + list[~azure.servicefabric.models.ApplicationParameter] + :param application_capacity: Describes capacity information for services + of this application. This description can be used for describing the + following. + - Reserving the capacity for the services on the nodes + - Limiting the total number of nodes that services of this application can + run on + - Limiting the custom capacity metrics to limit the total consumption of + this metric by the services of this application + :type application_capacity: + ~azure.servicefabric.models.ApplicationCapacityDescription + :param managed_application_identity: Managed application identity + description. :type managed_application_identity: ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ @@ -1023,17 +820,7 @@ class ApplicationDescription(msrest.serialization.Model): 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__( - self, - *, - name: str, - type_name: str, - type_version: str, - parameter_list: Optional[List["ApplicationParameter"]] = None, - application_capacity: Optional["ApplicationCapacityDescription"] = None, - managed_application_identity: Optional["ManagedApplicationIdentityDescription"] = None, - **kwargs - ): + def __init__(self, *, name: str, type_name: str, type_version: str, parameter_list=None, application_capacity=None, managed_application_identity=None, **kwargs) -> None: super(ApplicationDescription, self).__init__(**kwargs) self.name = name self.type_name = type_name @@ -1043,23 +830,26 @@ def __init__( self.managed_application_identity = managed_application_identity -class EntityHealth(msrest.serialization.Model): - """Health information common to all entities in the cluster. It contains the aggregated health state, health events and unhealthy evaluation. +class EntityHealth(Model): + """Health information common to all entities in the cluster. It contains the + aggregated health state, health events and unhealthy evaluation. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics """ @@ -1070,15 +860,7 @@ class EntityHealth(msrest.serialization.Model): 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, **kwargs) -> None: super(EntityHealth, self).__init__(**kwargs) self.aggregated_health_state = aggregated_health_state self.health_events = health_events @@ -1087,29 +869,36 @@ def __init__( class ApplicationHealth(EntityHealth): - """Represents the health of the application. Contains the application aggregated health state and the service and deployed application health states. - - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + """Represents the health of the application. Contains the application + aggregated health state and the service and deployed application health + states. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: The name of the application, including the 'fabric:' URI scheme. + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str - :param service_health_states: Service health states as found in the health store. - :type service_health_states: list[~azure.servicefabric.models.ServiceHealthState] - :param deployed_application_health_states: Deployed application health states as found in the - health store. + :param service_health_states: Service health states as found in the health + store. + :type service_health_states: + list[~azure.servicefabric.models.ServiceHealthState] + :param deployed_application_health_states: Deployed application health + states as found in the health store. :type deployed_application_health_states: list[~azure.servicefabric.models.DeployedApplicationHealthState] """ @@ -1124,49 +913,44 @@ class ApplicationHealth(EntityHealth): 'deployed_application_health_states': {'key': 'DeployedApplicationHealthStates', 'type': '[DeployedApplicationHealthState]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - name: Optional[str] = None, - service_health_states: Optional[List["ServiceHealthState"]] = None, - deployed_application_health_states: Optional[List["DeployedApplicationHealthState"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, service_health_states=None, deployed_application_health_states=None, **kwargs) -> None: super(ApplicationHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.name = name self.service_health_states = service_health_states self.deployed_application_health_states = deployed_application_health_states -class HealthEvaluation(msrest.serialization.Model): - """Represents a health evaluation which describes the data and the algorithm used by health manager to evaluate the health of an entity. +class HealthEvaluation(Model): + """Represents a health evaluation which describes the data and the algorithm + used by health manager to evaluate the health of an entity. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationHealthEvaluation, ApplicationTypeApplicationsHealthEvaluation, ApplicationsHealthEvaluation, DeltaNodesCheckHealthEvaluation, DeployedApplicationHealthEvaluation, DeployedApplicationsHealthEvaluation, DeployedServicePackageHealthEvaluation, DeployedServicePackagesHealthEvaluation, EventHealthEvaluation, NodeHealthEvaluation, NodeTypeNodesHealthEvaluation, NodesHealthEvaluation, PartitionHealthEvaluation, PartitionsHealthEvaluation, ReplicaHealthEvaluation, ReplicasHealthEvaluation, ServiceHealthEvaluation, ServicesHealthEvaluation, SystemApplicationHealthEvaluation, UpgradeDomainDeltaNodesCheckHealthEvaluation, UpgradeDomainNodesHealthEvaluation. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + sub-classes are: ApplicationHealthEvaluation, ApplicationsHealthEvaluation, + ApplicationTypeApplicationsHealthEvaluation, + DeltaNodesCheckHealthEvaluation, DeployedApplicationHealthEvaluation, + DeployedApplicationsHealthEvaluation, + DeployedServicePackageHealthEvaluation, + DeployedServicePackagesHealthEvaluation, EventHealthEvaluation, + NodeHealthEvaluation, NodesHealthEvaluation, PartitionHealthEvaluation, + PartitionsHealthEvaluation, ReplicaHealthEvaluation, + ReplicasHealthEvaluation, ServiceHealthEvaluation, + ServicesHealthEvaluation, SystemApplicationHealthEvaluation, + UpgradeDomainDeltaNodesCheckHealthEvaluation, + UpgradeDomainNodesHealthEvaluation, NodeTypeNodesHealthEvaluation + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -1174,56 +958,49 @@ class HealthEvaluation(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'Application': 'ApplicationHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'NodeTypeNodes': 'NodeTypeNodesHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation'} + 'kind': {'Application': 'ApplicationHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation', 'NodeTypeNodes': 'NodeTypeNodesHealthEvaluation'} } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, **kwargs) -> None: super(HealthEvaluation, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.aggregated_health_state = aggregated_health_state self.description = description + self.kind = None class ApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for an application, containing information about the data and the algorithm used by the health store to evaluate health. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for an application, containing information + about the data and the algorithm used by the health store to evaluate + health. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the application. The types of the unhealthy evaluations can be - DeployedApplicationsHealthEvaluation, ServicesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the application. The types of the + unhealthy evaluations can be DeployedApplicationsHealthEvaluation, + ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -1231,33 +1008,27 @@ class ApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - application_name: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, application_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: super(ApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Application' # type: str self.application_name = application_name self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Application' -class ApplicationHealthPolicies(msrest.serialization.Model): - """Defines the application health policy map used to evaluate the health of an application or one of its children entities. +class ApplicationHealthPolicies(Model): + """Defines the application health policy map used to evaluate the health of an + application or one of its children entities. - :param application_health_policy_map: The wrapper that contains the map with application health - policies used to evaluate specific applications in the cluster. + :param application_health_policy_map: The wrapper that contains the map + with application health policies used to evaluate specific applications in + the cluster. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] """ @@ -1266,36 +1037,36 @@ class ApplicationHealthPolicies(msrest.serialization.Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__( - self, - *, - application_health_policy_map: Optional[List["ApplicationHealthPolicyMapItem"]] = None, - **kwargs - ): + def __init__(self, *, application_health_policy_map=None, **kwargs) -> None: super(ApplicationHealthPolicies, self).__init__(**kwargs) self.application_health_policy_map = application_health_policy_map -class ApplicationHealthPolicy(msrest.serialization.Model): - """Defines a health policy used to evaluate the health of an application or one of its children entities. +class ApplicationHealthPolicy(Model): + """Defines a health policy used to evaluate the health of an application or + one of its children entities. - :param consider_warning_as_error: Indicates whether warnings are treated with the same severity - as errors. + :param consider_warning_as_error: Indicates whether warnings are treated + with the same severity as errors. Default value: False . :type consider_warning_as_error: bool - :param max_percent_unhealthy_deployed_applications: The maximum allowed percentage of unhealthy - deployed applications. Allowed values are Byte values from zero to 100. - The percentage represents the maximum tolerated percentage of deployed applications that can - be unhealthy before the application is considered in error. - This is calculated by dividing the number of unhealthy deployed applications over the number - of nodes where the application is currently deployed on in the cluster. - The computation rounds up to tolerate one failure on small numbers of nodes. Default - percentage is zero. + :param max_percent_unhealthy_deployed_applications: The maximum allowed + percentage of unhealthy deployed applications. Allowed values are Byte + values from zero to 100. + The percentage represents the maximum tolerated percentage of deployed + applications that can be unhealthy before the application is considered in + error. + This is calculated by dividing the number of unhealthy deployed + applications over the number of nodes where the application is currently + deployed on in the cluster. + The computation rounds up to tolerate one failure on small numbers of + nodes. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_deployed_applications: int - :param default_service_type_health_policy: The health policy used by default to evaluate the - health of a service type. - :type default_service_type_health_policy: ~azure.servicefabric.models.ServiceTypeHealthPolicy - :param service_type_health_policy_map: The map with service type health policy per service type - name. The map is empty by default. + :param default_service_type_health_policy: The health policy used by + default to evaluate the health of a service type. + :type default_service_type_health_policy: + ~azure.servicefabric.models.ServiceTypeHealthPolicy + :param service_type_health_policy_map: The map with service type health + policy per service type name. The map is empty by default. :type service_type_health_policy_map: list[~azure.servicefabric.models.ServiceTypeHealthPolicyMapItem] """ @@ -1307,15 +1078,7 @@ class ApplicationHealthPolicy(msrest.serialization.Model): 'service_type_health_policy_map': {'key': 'ServiceTypeHealthPolicyMap', 'type': '[ServiceTypeHealthPolicyMapItem]'}, } - def __init__( - self, - *, - consider_warning_as_error: Optional[bool] = False, - max_percent_unhealthy_deployed_applications: Optional[int] = 0, - default_service_type_health_policy: Optional["ServiceTypeHealthPolicy"] = None, - service_type_health_policy_map: Optional[List["ServiceTypeHealthPolicyMapItem"]] = None, - **kwargs - ): + def __init__(self, *, consider_warning_as_error: bool=False, max_percent_unhealthy_deployed_applications: int=0, default_service_type_health_policy=None, service_type_health_policy_map=None, **kwargs) -> None: super(ApplicationHealthPolicy, self).__init__(**kwargs) self.consider_warning_as_error = consider_warning_as_error self.max_percent_unhealthy_deployed_applications = max_percent_unhealthy_deployed_applications @@ -1323,16 +1086,16 @@ def __init__( self.service_type_health_policy_map = service_type_health_policy_map -class ApplicationHealthPolicyMapItem(msrest.serialization.Model): +class ApplicationHealthPolicyMapItem(Model): """Defines an item in ApplicationHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the application health policy map item. This is the name of - the application. + :param key: Required. The key of the application health policy map item. + This is the name of the application. :type key: str - :param value: Required. The value of the application health policy map item. This is the - ApplicationHealthPolicy for this application. + :param value: Required. The value of the application health policy map + item. This is the ApplicationHealthPolicy for this application. :type value: ~azure.servicefabric.models.ApplicationHealthPolicy """ @@ -1346,28 +1109,24 @@ class ApplicationHealthPolicyMapItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'ApplicationHealthPolicy'}, } - def __init__( - self, - *, - key: str, - value: "ApplicationHealthPolicy", - **kwargs - ): + def __init__(self, *, key: str, value, **kwargs) -> None: super(ApplicationHealthPolicyMapItem, self).__init__(**kwargs) self.key = key self.value = value -class ApplicationHealthPolicyMapObject(msrest.serialization.Model): - """Represents the map of application health policies for a ServiceFabric cluster upgrade. +class ApplicationHealthPolicyMapObject(Model): + """Represents the map of application health policies for a ServiceFabric + cluster upgrade. - :param application_health_policy_map: Defines a map that contains specific application health - policies for different applications. - Each entry specifies as key the application name and as value an ApplicationHealthPolicy used - to evaluate the application health. - If an application is not specified in the map, the application health evaluation uses the - ApplicationHealthPolicy found in its application manifest or the default application health - policy (if no health policy is defined in the manifest). + :param application_health_policy_map: Defines a map that contains specific + application health policies for different applications. + Each entry specifies as key the application name and as value an + ApplicationHealthPolicy used to evaluate the application health. + If an application is not specified in the map, the application health + evaluation uses the ApplicationHealthPolicy found in its application + manifest or the default application health policy (if no health policy is + defined in the manifest). The map is empty by default. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] @@ -1377,12 +1136,7 @@ class ApplicationHealthPolicyMapObject(msrest.serialization.Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__( - self, - *, - application_health_policy_map: Optional[List["ApplicationHealthPolicyMapItem"]] = None, - **kwargs - ): + def __init__(self, *, application_health_policy_map=None, **kwargs) -> None: super(ApplicationHealthPolicyMapObject, self).__init__(**kwargs) self.application_health_policy_map = application_health_policy_map @@ -1392,44 +1146,25 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -1445,16 +1180,17 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -1468,11 +1204,11 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -1485,27 +1221,8 @@ class ApplicationHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_instance_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationHealthReportExpired' # type: str self.application_instance_id = application_instance_id self.source_id = source_id self.property = property @@ -1515,39 +1232,42 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ApplicationHealthReportExpired' -class EntityHealthState(msrest.serialization.Model): - """A base type for the health state of various entities in the cluster. It contains the aggregated health state. +class EntityHealthState(Model): + """A base type for the health state of various entities in the cluster. It + contains the aggregated health state. - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState """ _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, **kwargs) -> None: super(EntityHealthState, self).__init__(**kwargs) self.aggregated_health_state = aggregated_health_state class ApplicationHealthState(EntityHealthState): - """Represents the health state of an application, which contains the application identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param name: The name of the application, including the 'fabric:' URI scheme. + """Represents the health state of an application, which contains the + application identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str """ @@ -1556,23 +1276,18 @@ class ApplicationHealthState(EntityHealthState): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, name: str=None, **kwargs) -> None: super(ApplicationHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.name = name -class EntityHealthStateChunk(msrest.serialization.Model): - """A base type for the health state chunk of various entities in the cluster. It contains the aggregated health state. +class EntityHealthStateChunk(Model): + """A base type for the health state chunk of various entities in the cluster. + It contains the aggregated health state. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState """ @@ -1580,33 +1295,35 @@ class EntityHealthStateChunk(msrest.serialization.Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - **kwargs - ): + def __init__(self, *, health_state=None, **kwargs) -> None: super(EntityHealthStateChunk, self).__init__(**kwargs) self.health_state = health_state class ApplicationHealthStateChunk(EntityHealthStateChunk): """Represents the health state chunk of a application. -The application health state chunk contains the application name, its aggregated health state and any children services and deployed applications that respect the filters in cluster health chunk query description. + The application health state chunk contains the application name, its + aggregated health state and any children services and deployed applications + that respect the filters in cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param application_type_name: The application type name as defined in the application manifest. + :param application_type_name: The application type name as defined in the + application manifest. :type application_type_name: str - :param service_health_state_chunks: The list of service health state chunks in the cluster that - respect the filters in the cluster health chunk query description. - :type service_health_state_chunks: ~azure.servicefabric.models.ServiceHealthStateChunkList - :param deployed_application_health_state_chunks: The list of deployed application health state - chunks in the cluster that respect the filters in the cluster health chunk query description. + :param service_health_state_chunks: The list of service health state + chunks in the cluster that respect the filters in the cluster health chunk + query description. + :type service_health_state_chunks: + ~azure.servicefabric.models.ServiceHealthStateChunkList + :param deployed_application_health_state_chunks: The list of deployed + application health state chunks in the cluster that respect the filters in + the cluster health chunk query description. :type deployed_application_health_state_chunks: ~azure.servicefabric.models.DeployedApplicationHealthStateChunkList """ @@ -1619,16 +1336,7 @@ class ApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_application_health_state_chunks': {'key': 'DeployedApplicationHealthStateChunks', 'type': 'DeployedApplicationHealthStateChunkList'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - application_name: Optional[str] = None, - application_type_name: Optional[str] = None, - service_health_state_chunks: Optional["ServiceHealthStateChunkList"] = None, - deployed_application_health_state_chunks: Optional["DeployedApplicationHealthStateChunkList"] = None, - **kwargs - ): + def __init__(self, *, health_state=None, application_name: str=None, application_type_name: str=None, service_health_state_chunks=None, deployed_application_health_state_chunks=None, **kwargs) -> None: super(ApplicationHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.application_name = application_name self.application_type_name = application_type_name @@ -1636,11 +1344,12 @@ def __init__( self.deployed_application_health_state_chunks = deployed_application_health_state_chunks -class EntityHealthStateChunkList(msrest.serialization.Model): - """A base type for the list of health state chunks found in the cluster. It contains the total number of health states that match the input filters. +class EntityHealthStateChunkList(Model): + """A base type for the list of health state chunks found in the cluster. It + contains the total number of health states that match the input filters. - :param total_count: Total number of entity health state objects that match the specified - filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match + the specified filters from the cluster health chunk query description. :type total_count: long """ @@ -1648,24 +1357,21 @@ class EntityHealthStateChunkList(msrest.serialization.Model): 'total_count': {'key': 'TotalCount', 'type': 'long'}, } - def __init__( - self, - *, - total_count: Optional[int] = None, - **kwargs - ): + def __init__(self, *, total_count: int=None, **kwargs) -> None: super(EntityHealthStateChunkList, self).__init__(**kwargs) self.total_count = total_count class ApplicationHealthStateChunkList(EntityHealthStateChunkList): - """The list of application health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. + """The list of application health state chunks in the cluster that respect the + input filters in the chunk query. Returned by get cluster health state + chunks query. - :param total_count: Total number of entity health state objects that match the specified - filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match + the specified filters from the cluster health chunk query description. :type total_count: long - :param items: The list of application health state chunks that respect the input filters in the - chunk query. + :param items: The list of application health state chunks that respect the + input filters in the chunk query. :type items: list[~azure.servicefabric.models.ApplicationHealthStateChunk] """ @@ -1674,81 +1380,87 @@ class ApplicationHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[ApplicationHealthStateChunk]'}, } - def __init__( - self, - *, - total_count: Optional[int] = None, - items: Optional[List["ApplicationHealthStateChunk"]] = None, - **kwargs - ): + def __init__(self, *, total_count: int=None, items=None, **kwargs) -> None: super(ApplicationHealthStateChunkList, self).__init__(total_count=total_count, **kwargs) self.items = items -class ApplicationHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a application should be included in the cluster health chunk. -One filter can match zero, one or multiple applications, depending on its properties. +class ApplicationHealthStateFilter(Model): + """Defines matching criteria to determine whether a application should be + included in the cluster health chunk. + One filter can match zero, one or multiple applications, depending on its + properties. - :param application_name_filter: The name of the application that matches the filter, as a - fabric uri. The filter is applied only to the specified application, if it exists. - If the application doesn't exist, no application is returned in the cluster health chunk based - on this filter. - If the application exists, it is included in the cluster health chunk if it respects the other - filter properties. - If not specified, all applications are matched against the other filter members, like health - state filter. - :type application_name_filter: str - :param application_type_name_filter: The name of the application type that matches the filter. - If specified, the filter is applied only to applications of the selected application type, if - any exists. - If no applications of the specified application type exists, no application is returned in the + :param application_name_filter: The name of the application that matches + the filter, as a fabric uri. The filter is applied only to the specified + application, if it exists. + If the application doesn't exist, no application is returned in the cluster health chunk based on this filter. - Each application of the specified application type is included in the cluster health chunk if + If the application exists, it is included in the cluster health chunk if it respects the other filter properties. - If not specified, all applications are matched against the other filter members, like health - state filter. + If not specified, all applications are matched against the other filter + members, like health state filter. + :type application_name_filter: str + :param application_type_name_filter: The name of the application type that + matches the filter. + If specified, the filter is applied only to applications of the selected + application type, if any exists. + If no applications of the specified application type exists, no + application is returned in the cluster health chunk based on this filter. + Each application of the specified application type is included in the + cluster health chunk if it respects the other filter properties. + If not specified, all applications are matched against the other filter + members, like health state filter. :type application_type_name_filter: str - :param health_state_filter: The filter for the health state of the applications. It allows - selecting applications if they match the desired health states. - The possible values are integer value of one of the following health states. Only applications - that match the filter are returned. All applications are used to evaluate the cluster - aggregated health state. - If not specified, default value is None, unless the application name or the application type - name are specified. If the filter has default value and application name is specified, the - matching application is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches applications with HealthState value of OK - (2) and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + applications. It allows selecting applications if they match the desired + health states. + The possible values are integer value of one of the following health + states. Only applications that match the filter are returned. All + applications are used to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the application name or + the application type name are specified. If the filter has default value + and application name is specified, the matching application is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches applications with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int - :param service_filters: Defines a list of filters that specify which services to be included in - the returned cluster health chunk as children of the application. The services are returned - only if the parent application matches a filter. - If the list is empty, no services are returned. All the services are used to evaluate the - parent application aggregated health state, regardless of the input filters. + :param service_filters: Defines a list of filters that specify which + services to be included in the returned cluster health chunk as children + of the application. The services are returned only if the parent + application matches a filter. + If the list is empty, no services are returned. All the services are used + to evaluate the parent application aggregated health state, regardless of + the input filters. The application filter may specify multiple service filters. - For example, it can specify a filter to return all services with health state Error and - another filter to always include a service identified by its service name. - :type service_filters: list[~azure.servicefabric.models.ServiceHealthStateFilter] - :param deployed_application_filters: Defines a list of filters that specify which deployed - applications to be included in the returned cluster health chunk as children of the - application. The deployed applications are returned only if the parent application matches a - filter. - If the list is empty, no deployed applications are returned. All the deployed applications are - used to evaluate the parent application aggregated health state, regardless of the input - filters. + For example, it can specify a filter to return all services with health + state Error and another filter to always include a service identified by + its service name. + :type service_filters: + list[~azure.servicefabric.models.ServiceHealthStateFilter] + :param deployed_application_filters: Defines a list of filters that + specify which deployed applications to be included in the returned cluster + health chunk as children of the application. The deployed applications are + returned only if the parent application matches a filter. + If the list is empty, no deployed applications are returned. All the + deployed applications are used to evaluate the parent application + aggregated health state, regardless of the input filters. The application filter may specify multiple deployed application filters. - For example, it can specify a filter to return all deployed applications with health state - Error and another filter to always include a deployed application on a specified node. + For example, it can specify a filter to return all deployed applications + with health state Error and another filter to always include a deployed + application on a specified node. :type deployed_application_filters: list[~azure.servicefabric.models.DeployedApplicationHealthStateFilter] """ @@ -1761,16 +1473,7 @@ class ApplicationHealthStateFilter(msrest.serialization.Model): 'deployed_application_filters': {'key': 'DeployedApplicationFilters', 'type': '[DeployedApplicationHealthStateFilter]'}, } - def __init__( - self, - *, - application_name_filter: Optional[str] = None, - application_type_name_filter: Optional[str] = None, - health_state_filter: Optional[int] = 0, - service_filters: Optional[List["ServiceHealthStateFilter"]] = None, - deployed_application_filters: Optional[List["DeployedApplicationHealthStateFilter"]] = None, - **kwargs - ): + def __init__(self, *, application_name_filter: str=None, application_type_name_filter: str=None, health_state_filter: int=0, service_filters=None, deployed_application_filters=None, **kwargs) -> None: super(ApplicationHealthStateFilter, self).__init__(**kwargs) self.application_name_filter = application_name_filter self.application_type_name_filter = application_type_name_filter @@ -1779,36 +1482,43 @@ def __init__( self.deployed_application_filters = deployed_application_filters -class ApplicationInfo(msrest.serialization.Model): +class ApplicationInfo(Model): """Information about a Service Fabric application. - :param id: The identity of the application. This is an encoded representation of the - application name. This is used in the REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI scheme. + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str - :param type_name: The application type name as defined in the application manifest. - :type type_name: str - :param type_version: The version of the application type as defined in the application + :param type_name: The application type name as defined in the application manifest. + :type type_name: str + :param type_version: The version of the application type as defined in the + application manifest. :type type_version: str - :param status: The status of the application. Possible values include: "Invalid", "Ready", - "Upgrading", "Creating", "Deleting", "Failed". + :param status: The status of the application. Possible values include: + 'Invalid', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :type status: str or ~azure.servicefabric.models.ApplicationStatus - :param parameters: List of application parameters with overridden values from their default - values specified in the application manifest. + :param parameters: List of application parameters with overridden values + from their default values specified in the application manifest. :type parameters: list[~azure.servicefabric.models.ApplicationParameter] - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param application_definition_kind: The mechanism used to define a Service Fabric application. - Possible values include: "Invalid", "ServiceFabricApplicationDescription", "Compose". - :type application_definition_kind: str or ~azure.servicefabric.models.ApplicationDefinitionKind - :param managed_application_identity: Managed application identity description. + :param application_definition_kind: The mechanism used to define a Service + Fabric application. Possible values include: 'Invalid', + 'ServiceFabricApplicationDescription', 'Compose' + :type application_definition_kind: str or + ~azure.servicefabric.models.ApplicationDefinitionKind + :param managed_application_identity: Managed application identity + description. :type managed_application_identity: ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ @@ -1825,20 +1535,7 @@ class ApplicationInfo(msrest.serialization.Model): 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__( - self, - *, - id: Optional[str] = None, - name: Optional[str] = None, - type_name: Optional[str] = None, - type_version: Optional[str] = None, - status: Optional[Union[str, "ApplicationStatus"]] = None, - parameters: Optional[List["ApplicationParameter"]] = None, - health_state: Optional[Union[str, "HealthState"]] = None, - application_definition_kind: Optional[Union[str, "ApplicationDefinitionKind"]] = None, - managed_application_identity: Optional["ManagedApplicationIdentityDescription"] = None, - **kwargs - ): + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, type_version: str=None, status=None, parameters=None, health_state=None, application_definition_kind=None, managed_application_identity=None, **kwargs) -> None: super(ApplicationInfo, self).__init__(**kwargs) self.id = id self.name = name @@ -1851,28 +1548,37 @@ def __init__( self.managed_application_identity = managed_application_identity -class ApplicationLoadInfo(msrest.serialization.Model): +class ApplicationLoadInfo(Model): """Load Information about a Service Fabric application. - :param id: The identity of the application. This is an encoded representation of the - application name. This is used in the REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type id: str :param minimum_nodes: The minimum number of nodes for this application. - It is the number of nodes where Service Fabric will reserve Capacity in the cluster which - equals to ReservedLoad * MinimumNodes for this Application instance. - For applications that do not have application capacity defined this value will be zero. + It is the number of nodes where Service Fabric will reserve Capacity in + the cluster which equals to ReservedLoad * MinimumNodes for this + Application instance. + For applications that do not have application capacity defined this value + will be zero. :type minimum_nodes: long - :param maximum_nodes: The maximum number of nodes where this application can be instantiated. + :param maximum_nodes: The maximum number of nodes where this application + can be instantiated. It is the number of nodes this application is allowed to span. - For applications that do not have application capacity defined this value will be zero. + For applications that do not have application capacity defined this value + will be zero. :type maximum_nodes: long - :param node_count: The number of nodes on which this application is instantiated. - For applications that do not have application capacity defined this value will be zero. + :param node_count: The number of nodes on which this application is + instantiated. + For applications that do not have application capacity defined this value + will be zero. :type node_count: long - :param application_load_metric_information: List of application load metric information. + :param application_load_metric_information: List of application load + metric information. :type application_load_metric_information: list[~azure.servicefabric.models.ApplicationLoadMetricInformation] """ @@ -1885,16 +1591,7 @@ class ApplicationLoadInfo(msrest.serialization.Model): 'application_load_metric_information': {'key': 'ApplicationLoadMetricInformation', 'type': '[ApplicationLoadMetricInformation]'}, } - def __init__( - self, - *, - id: Optional[str] = None, - minimum_nodes: Optional[int] = None, - maximum_nodes: Optional[int] = None, - node_count: Optional[int] = None, - application_load_metric_information: Optional[List["ApplicationLoadMetricInformation"]] = None, - **kwargs - ): + def __init__(self, *, id: str=None, minimum_nodes: int=None, maximum_nodes: int=None, node_count: int=None, application_load_metric_information=None, **kwargs) -> None: super(ApplicationLoadInfo, self).__init__(**kwargs) self.id = id self.minimum_nodes = minimum_nodes @@ -1903,20 +1600,26 @@ def __init__( self.application_load_metric_information = application_load_metric_information -class ApplicationLoadMetricInformation(msrest.serialization.Model): - """Describes load information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. +class ApplicationLoadMetricInformation(Model): + """Describes load information for a custom resource balancing metric. This can + be used to limit the total consumption of this metric by the services of + this application. :param name: The name of the metric. :type name: str - :param reservation_capacity: This is the capacity reserved in the cluster for the application. + :param reservation_capacity: This is the capacity reserved in the cluster + for the application. It's the product of NodeReservationCapacity and MinimumNodes. If set to zero, no capacity is reserved for this metric. - When setting application capacity or when updating application capacity this value must be - smaller than or equal to MaximumCapacity for each metric. + When setting application capacity or when updating application capacity + this value must be smaller than or equal to MaximumCapacity for each + metric. :type reservation_capacity: long - :param application_capacity: Total capacity for this metric in this application instance. + :param application_capacity: Total capacity for this metric in this + application instance. :type application_capacity: long - :param application_load: Current load for this metric in this application instance. + :param application_load: Current load for this metric in this application + instance. :type application_load: long """ @@ -1927,15 +1630,7 @@ class ApplicationLoadMetricInformation(msrest.serialization.Model): 'application_load': {'key': 'ApplicationLoad', 'type': 'long'}, } - def __init__( - self, - *, - name: Optional[str] = None, - reservation_capacity: Optional[int] = None, - application_capacity: Optional[int] = None, - application_load: Optional[int] = None, - **kwargs - ): + def __init__(self, *, name: str=None, reservation_capacity: int=None, application_capacity: int=None, application_load: int=None, **kwargs) -> None: super(ApplicationLoadMetricInformation, self).__init__(**kwargs) self.name = name self.reservation_capacity = reservation_capacity @@ -1943,35 +1638,46 @@ def __init__( self.application_load = application_load -class ApplicationMetricDescription(msrest.serialization.Model): - """Describes capacity information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. +class ApplicationMetricDescription(Model): + """Describes capacity information for a custom resource balancing metric. This + can be used to limit the total consumption of this metric by the services + of this application. :param name: The name of the metric. :type name: str - :param maximum_capacity: The maximum node capacity for Service Fabric application. - This is the maximum Load for an instance of this application on a single node. Even if the - capacity of node is greater than this value, Service Fabric will limit the total load of - services within the application on each node to this value. + :param maximum_capacity: The maximum node capacity for Service Fabric + application. + This is the maximum Load for an instance of this application on a single + node. Even if the capacity of node is greater than this value, Service + Fabric will limit the total load of services within the application on + each node to this value. If set to zero, capacity for this metric is unlimited on each node. - When creating a new application with application capacity defined, the product of MaximumNodes - and this value must always be smaller than or equal to TotalApplicationCapacity. - When updating existing application with application capacity, the product of MaximumNodes and - this value must always be smaller than or equal to TotalApplicationCapacity. + When creating a new application with application capacity defined, the + product of MaximumNodes and this value must always be smaller than or + equal to TotalApplicationCapacity. + When updating existing application with application capacity, the product + of MaximumNodes and this value must always be smaller than or equal to + TotalApplicationCapacity. :type maximum_capacity: long - :param reservation_capacity: The node reservation capacity for Service Fabric application. - This is the amount of load which is reserved on nodes which have instances of this - application. - If MinimumNodes is specified, then the product of these values will be the capacity reserved - in the cluster for the application. + :param reservation_capacity: The node reservation capacity for Service + Fabric application. + This is the amount of load which is reserved on nodes which have instances + of this application. + If MinimumNodes is specified, then the product of these values will be the + capacity reserved in the cluster for the application. If set to zero, no capacity is reserved for this metric. - When setting application capacity or when updating application capacity; this value must be - smaller than or equal to MaximumCapacity for each metric. + When setting application capacity or when updating application capacity; + this value must be smaller than or equal to MaximumCapacity for each + metric. :type reservation_capacity: long - :param total_application_capacity: The total metric capacity for Service Fabric application. - This is the total metric capacity for this application in the cluster. Service Fabric will try - to limit the sum of loads of services within the application to this value. - When creating a new application with application capacity defined, the product of MaximumNodes - and MaximumCapacity must always be smaller than or equal to this value. + :param total_application_capacity: The total metric capacity for Service + Fabric application. + This is the total metric capacity for this application in the cluster. + Service Fabric will try to limit the sum of loads of services within the + application to this value. + When creating a new application with application capacity defined, the + product of MaximumNodes and MaximumCapacity must always be smaller than or + equal to this value. :type total_application_capacity: long """ @@ -1982,15 +1688,7 @@ class ApplicationMetricDescription(msrest.serialization.Model): 'total_application_capacity': {'key': 'TotalApplicationCapacity', 'type': 'long'}, } - def __init__( - self, - *, - name: Optional[str] = None, - maximum_capacity: Optional[int] = None, - reservation_capacity: Optional[int] = None, - total_application_capacity: Optional[int] = None, - **kwargs - ): + def __init__(self, *, name: str=None, maximum_capacity: int=None, reservation_capacity: int=None, total_application_capacity: int=None, **kwargs) -> None: super(ApplicationMetricDescription, self).__init__(**kwargs) self.name = name self.maximum_capacity = maximum_capacity @@ -1998,16 +1696,19 @@ def __init__( self.total_application_capacity = total_application_capacity -class ApplicationNameInfo(msrest.serialization.Model): +class ApplicationNameInfo(Model): """Information about the application name. - :param id: The identity of the application. This is an encoded representation of the - application name. This is used in the REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI scheme. + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str """ @@ -2016,13 +1717,7 @@ class ApplicationNameInfo(msrest.serialization.Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - *, - id: Optional[str] = None, - name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, id: str=None, name: str=None, **kwargs) -> None: super(ApplicationNameInfo, self).__init__(**kwargs) self.id = id self.name = name @@ -2033,44 +1728,25 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -2086,16 +1762,17 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -2109,11 +1786,11 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -2126,27 +1803,8 @@ class ApplicationNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_instance_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationNewHealthReport' # type: str self.application_instance_id = application_instance_id self.source_id = source_id self.property = property @@ -2156,10 +1814,12 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ApplicationNewHealthReport' -class ApplicationParameter(msrest.serialization.Model): - """Describes an application parameter override to be applied when creating or upgrading an application. +class ApplicationParameter(Model): + """Describes an application parameter override to be applied when creating or + upgrading an application. All required parameters must be populated in order to send to Azure. @@ -2179,13 +1839,7 @@ class ApplicationParameter(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__( - self, - *, - key: str, - value: str, - **kwargs - ): + def __init__(self, *, key: str, value: str, **kwargs) -> None: super(ApplicationParameter, self).__init__(**kwargs) self.key = key self.value = value @@ -2196,50 +1850,32 @@ class ApplicationProcessExitedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param service_name: Required. Name of Service. :type service_name: str :param service_package_name: Required. Name of Service package. :type service_package_name: str - :param service_package_activation_id: Required. Activation Id of Service package. + :param service_package_activation_id: Required. Activation Id of Service + package. :type service_package_activation_id: str :param is_exclusive: Required. Indicates IsExclusive flag. :type is_exclusive: bool @@ -2255,16 +1891,17 @@ class ApplicationProcessExitedEvent(ApplicationEvent): :type host_id: str :param exit_code: Required. Exit code of process. :type exit_code: long - :param unexpected_termination: Required. Indicates if termination is unexpected. + :param unexpected_termination: Required. Indicates if termination is + unexpected. :type unexpected_termination: bool :param start_time: Required. Start time of process. - :type start_time: ~datetime.datetime + :type start_time: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'service_name': {'required': True}, 'service_package_name': {'required': True}, @@ -2281,11 +1918,11 @@ class ApplicationProcessExitedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, @@ -2301,30 +1938,8 @@ class ApplicationProcessExitedEvent(ApplicationEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - service_name: str, - service_package_name: str, - service_package_activation_id: str, - is_exclusive: bool, - code_package_name: str, - entry_point_type: str, - exe_name: str, - process_id: int, - host_id: str, - exit_code: int, - unexpected_termination: bool, - start_time: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_name: str, service_package_name: str, service_package_activation_id: str, is_exclusive: bool, code_package_name: str, entry_point_type: str, exe_name: str, process_id: int, host_id: str, exit_code: int, unexpected_termination: bool, start_time, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationProcessExitedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationProcessExited' # type: str self.service_name = service_name self.service_package_name = service_package_name self.service_package_activation_id = service_package_activation_id @@ -2337,44 +1952,50 @@ def __init__( self.exit_code = exit_code self.unexpected_termination = unexpected_termination self.start_time = start_time + self.kind = 'ApplicationProcessExited' -class ApplicationResourceDescription(msrest.serialization.Model): +class ApplicationResourceDescription(Model): """This type describes a application resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the Application resource. :type name: str - :param identity: Describes the identity of the application. - :type identity: ~azure.servicefabric.models.IdentityDescription :param description: User readable description of the application. :type description: str - :param services: Describes the services in the application. This property is used to create or - modify services of the application. On get only the name of the service is returned. The - service description can be obtained by querying for the service resource. - :type services: list[~azure.servicefabric.models.ServiceResourceDescription] - :param diagnostics: Describes the diagnostics definition and usage for an application resource. + :param services: Describes the services in the application. This property + is used to create or modify services of the application. On get only the + name of the service is returned. The service description can be obtained + by querying for the service resource. + :type services: + list[~azure.servicefabric.models.ServiceResourceDescription] + :param diagnostics: Describes the diagnostics definition and usage for an + application resource. :type diagnostics: ~azure.servicefabric.models.DiagnosticsDescription - :param debug_params: Internal - used by Visual Studio to setup the debugging session on the - local development environment. + :param debug_params: Internal - used by Visual Studio to setup the + debugging session on the local development environment. :type debug_params: str :ivar service_names: Names of the services in the application. :vartype service_names: list[str] - :ivar status: Status of the application. Possible values include: "Unknown", "Ready", - "Upgrading", "Creating", "Deleting", "Failed". + :ivar status: Status of the application. Possible values include: + 'Unknown', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the application. + :ivar status_details: Gives additional information about the current + status of the application. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. Possible values - include: "Invalid", "Ok", "Warning", "Error", "Unknown". + :ivar health_state: Describes the health state of an application resource. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the application's health state is not 'Ok', this additional - details from service fabric Health Manager for the user to know why the application is marked - unhealthy. + :ivar unhealthy_evaluation: When the application's health state is not + 'Ok', this additional details from service fabric Health Manager for the + user to know why the application is marked unhealthy. :vartype unhealthy_evaluation: str + :param identity: Describes the identity of the application. + :type identity: ~azure.servicefabric.models.IdentityDescription """ _validation = { @@ -2388,7 +2009,6 @@ class ApplicationResourceDescription(msrest.serialization.Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, - 'identity': {'key': 'identity', 'type': 'IdentityDescription'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'services': {'key': 'properties.services', 'type': '[ServiceResourceDescription]'}, 'diagnostics': {'key': 'properties.diagnostics', 'type': 'DiagnosticsDescription'}, @@ -2398,22 +2018,12 @@ class ApplicationResourceDescription(msrest.serialization.Model): 'status_details': {'key': 'properties.statusDetails', 'type': 'str'}, 'health_state': {'key': 'properties.healthState', 'type': 'str'}, 'unhealthy_evaluation': {'key': 'properties.unhealthyEvaluation', 'type': 'str'}, + 'identity': {'key': 'identity', 'type': 'IdentityDescription'}, } - def __init__( - self, - *, - name: str, - identity: Optional["IdentityDescription"] = None, - description: Optional[str] = None, - services: Optional[List["ServiceResourceDescription"]] = None, - diagnostics: Optional["DiagnosticsDescription"] = None, - debug_params: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str, description: str=None, services=None, diagnostics=None, debug_params: str=None, identity=None, **kwargs) -> None: super(ApplicationResourceDescription, self).__init__(**kwargs) self.name = name - self.identity = identity self.description = description self.services = services self.diagnostics = diagnostics @@ -2423,45 +2033,56 @@ def __init__( self.status_details = None self.health_state = None self.unhealthy_evaluation = None + self.identity = identity -class ApplicationResourceUpgradeProgressInfo(msrest.serialization.Model): +class ApplicationResourceUpgradeProgressInfo(Model): """This type describes an application resource upgrade. :param name: Name of the Application resource. :type name: str - :param target_application_type_version: The target application version for the application - upgrade. + :param target_application_type_version: The target application version for + the application upgrade. :type target_application_type_version: str - :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade + started. :type start_timestamp_utc: str - :param upgrade_state: The state of the application resource upgrade. Possible values include: - "Invalid", "ProvisioningTarget", "RollingForward", "UnprovisioningCurrent", - "CompletedRollforward", "RollingBack", "UnprovisioningTarget", "CompletedRollback", "Failed". - :type upgrade_state: str or ~azure.servicefabric.models.ApplicationResourceUpgradeState - :param percent_completed: The estimated percent of replicas are completed in the upgrade. + :param upgrade_state: The state of the application resource upgrade. + Possible values include: 'Invalid', 'ProvisioningTarget', + 'RollingForward', 'UnprovisioningCurrent', 'CompletedRollforward', + 'RollingBack', 'UnprovisioningTarget', 'CompletedRollback', 'Failed' + :type upgrade_state: str or + ~azure.servicefabric.models.ApplicationResourceUpgradeState + :param percent_completed: The estimated percent of replicas are completed + in the upgrade. :type percent_completed: str :param service_upgrade_progress: List of service upgrade progresses. - :type service_upgrade_progress: list[~azure.servicefabric.models.ServiceUpgradeProgress] - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: "Monitored". - :type rolling_upgrade_mode: str or ~azure.servicefabric.models.RollingUpgradeMode - :param upgrade_duration: The estimated amount of time that the overall upgrade elapsed. It is - first interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :type service_upgrade_progress: + list[~azure.servicefabric.models.ServiceUpgradeProgress] + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "Monitored" . + :type rolling_upgrade_mode: str or + ~azure.servicefabric.models.RollingUpgradeMode + :param upgrade_duration: The estimated amount of time that the overall + upgrade elapsed. It is first interpreted as a string representing an ISO + 8601 duration. If that fails, then it is interpreted as a number + representing the total number of milliseconds. Default value: "PT0H2M0S" . :type upgrade_duration: str - :param application_upgrade_status_details: Additional detailed information about the status of - the pending upgrade. + :param application_upgrade_status_details: Additional detailed information + about the status of the pending upgrade. :type application_upgrade_status_details: str - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). Default value: 42949672925 . :type upgrade_replica_set_check_timeout_in_seconds: long - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and - FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade + failed and FailureAction was executed. :type failure_timestamp_utc: str """ @@ -2479,22 +2100,7 @@ class ApplicationResourceUpgradeProgressInfo(msrest.serialization.Model): 'failure_timestamp_utc': {'key': 'FailureTimestampUtc', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - target_application_type_version: Optional[str] = None, - start_timestamp_utc: Optional[str] = None, - upgrade_state: Optional[Union[str, "ApplicationResourceUpgradeState"]] = None, - percent_completed: Optional[str] = None, - service_upgrade_progress: Optional[List["ServiceUpgradeProgress"]] = None, - rolling_upgrade_mode: Optional[Union[str, "RollingUpgradeMode"]] = "Monitored", - upgrade_duration: Optional[str] = "PT0H2M0S", - application_upgrade_status_details: Optional[str] = None, - upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, - failure_timestamp_utc: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, target_application_type_version: str=None, start_timestamp_utc: str=None, upgrade_state=None, percent_completed: str=None, service_upgrade_progress=None, rolling_upgrade_mode="Monitored", upgrade_duration: str="PT0H2M0S", application_upgrade_status_details: str=None, upgrade_replica_set_check_timeout_in_seconds: int=42949672925, failure_timestamp_utc: str=None, **kwargs) -> None: super(ApplicationResourceUpgradeProgressInfo, self).__init__(**kwargs) self.name = name self.target_application_type_version = target_application_type_version @@ -2509,17 +2115,18 @@ def __init__( self.failure_timestamp_utc = failure_timestamp_utc -class VolumeReference(msrest.serialization.Model): +class VolumeReference(Model): """Describes a reference to a volume resource. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the volume being referenced. :type name: str - :param read_only: The flag indicating whether the volume is read only. Default is 'false'. + :param read_only: The flag indicating whether the volume is read only. + Default is 'false'. :type read_only: bool - :param destination_path: Required. The path within the container at which the volume should be - mounted. Only valid path characters are allowed. + :param destination_path: Required. The path within the container at which + the volume should be mounted. Only valid path characters are allowed. :type destination_path: str """ @@ -2534,14 +2141,7 @@ class VolumeReference(msrest.serialization.Model): 'destination_path': {'key': 'destinationPath', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - destination_path: str, - read_only: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, name: str, destination_path: str, read_only: bool=None, **kwargs) -> None: super(VolumeReference, self).__init__(**kwargs) self.name = name self.read_only = read_only @@ -2555,13 +2155,14 @@ class ApplicationScopedVolume(VolumeReference): :param name: Required. Name of the volume being referenced. :type name: str - :param read_only: The flag indicating whether the volume is read only. Default is 'false'. + :param read_only: The flag indicating whether the volume is read only. + Default is 'false'. :type read_only: bool - :param destination_path: Required. The path within the container at which the volume should be - mounted. Only valid path characters are allowed. + :param destination_path: Required. The path within the container at which + the volume should be mounted. Only valid path characters are allowed. :type destination_path: str - :param creation_parameters: Required. Describes parameters for creating application-scoped - volumes. + :param creation_parameters: Required. Describes parameters for creating + application-scoped volumes. :type creation_parameters: ~azure.servicefabric.models.ApplicationScopedVolumeCreationParameters """ @@ -2579,32 +2180,24 @@ class ApplicationScopedVolume(VolumeReference): 'creation_parameters': {'key': 'creationParameters', 'type': 'ApplicationScopedVolumeCreationParameters'}, } - def __init__( - self, - *, - name: str, - destination_path: str, - creation_parameters: "ApplicationScopedVolumeCreationParameters", - read_only: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, name: str, destination_path: str, creation_parameters, read_only: bool=None, **kwargs) -> None: super(ApplicationScopedVolume, self).__init__(name=name, read_only=read_only, destination_path=destination_path, **kwargs) self.creation_parameters = creation_parameters -class ApplicationScopedVolumeCreationParameters(msrest.serialization.Model): +class ApplicationScopedVolumeCreationParameters(Model): """Describes parameters for creating application-scoped volumes. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk. + sub-classes are: + ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the application-scoped volume kind.Constant filled by server. - Possible values include: "ServiceFabricVolumeDisk". - :type kind: str or ~azure.servicefabric.models.ApplicationScopedVolumeKind :param description: User readable description of the volume. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -2612,36 +2205,32 @@ class ApplicationScopedVolumeCreationParameters(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'kind', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, } _subtype_map = { 'kind': {'ServiceFabricVolumeDisk': 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk'} } - def __init__( - self, - *, - description: Optional[str] = None, - **kwargs - ): + def __init__(self, *, description: str=None, **kwargs) -> None: super(ApplicationScopedVolumeCreationParameters, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.description = description + self.kind = None class ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk(ApplicationScopedVolumeCreationParameters): - """Describes parameters for creating application-scoped volumes provided by Service Fabric Volume Disks. + """Describes parameters for creating application-scoped volumes provided by + Service Fabric Volume Disks. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the application-scoped volume kind.Constant filled by server. - Possible values include: "ServiceFabricVolumeDisk". - :type kind: str or ~azure.servicefabric.models.ApplicationScopedVolumeKind :param description: User readable description of the volume. :type description: str - :param size_disk: Required. Volume size. Possible values include: "Small", "Medium", "Large". + :param kind: Required. Constant filled by server. + :type kind: str + :param size_disk: Required. Volume size. Possible values include: 'Small', + 'Medium', 'Large' :type size_disk: str or ~azure.servicefabric.models.SizeTypes """ @@ -2651,54 +2240,45 @@ class ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk(Applicati } _attribute_map = { - 'kind': {'key': 'kind', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, 'size_disk': {'key': 'sizeDisk', 'type': 'str'}, } - def __init__( - self, - *, - size_disk: Union[str, "SizeTypes"], - description: Optional[str] = None, - **kwargs - ): + def __init__(self, *, size_disk, description: str=None, **kwargs) -> None: super(ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk, self).__init__(description=description, **kwargs) - self.kind = 'ServiceFabricVolumeDisk' # type: str self.size_disk = size_disk + self.kind = 'ServiceFabricVolumeDisk' class ApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for applications, containing health evaluations for each unhealthy application that impacted current aggregated health state. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for applications, containing health + evaluations for each unhealthy application that impacted current aggregated + health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_applications: Maximum allowed percentage of unhealthy applications - from the ClusterHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_applications: Maximum allowed percentage of + unhealthy applications from the ClusterHealthPolicy. :type max_percent_unhealthy_applications: int :param total_count: Total number of applications from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy ApplicationHealthEvaluation that impacted the aggregated - health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ApplicationHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -2706,65 +2286,59 @@ class ApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - max_percent_unhealthy_applications: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_applications: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(ApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Applications' # type: str self.max_percent_unhealthy_applications = max_percent_unhealthy_applications self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Applications' class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for applications of a particular application type. The application type applications evaluation can be returned when cluster health evaluation returns unhealthy aggregated health state, either Error or Warning. It contains health evaluations for each unhealthy application of the included application type that impacted current aggregated health state. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for applications of a particular application + type. The application type applications evaluation can be returned when + cluster health evaluation returns unhealthy aggregated health state, either + Error or Warning. It contains health evaluations for each unhealthy + application of the included application type that impacted current + aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param application_type_name: The application type name as defined in the application manifest. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_type_name: The application type name as defined in the + application manifest. :type application_type_name: str - :param max_percent_unhealthy_applications: Maximum allowed percentage of unhealthy applications - for the application type, specified as an entry in ApplicationTypeHealthPolicyMap. + :param max_percent_unhealthy_applications: Maximum allowed percentage of + unhealthy applications for the application type, specified as an entry in + ApplicationTypeHealthPolicyMap. :type max_percent_unhealthy_applications: int - :param total_count: Total number of applications of the application type found in the health - store. + :param total_count: Total number of applications of the application type + found in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy ApplicationHealthEvaluation of this application type that - impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ApplicationHealthEvaluation of this application type that impacted the + aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -2772,45 +2346,36 @@ class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - application_type_name: Optional[str] = None, - max_percent_unhealthy_applications: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, application_type_name: str=None, max_percent_unhealthy_applications: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(ApplicationTypeApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'ApplicationTypeApplications' # type: str self.application_type_name = application_type_name self.max_percent_unhealthy_applications = max_percent_unhealthy_applications self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'ApplicationTypeApplications' -class ApplicationTypeHealthPolicyMapItem(msrest.serialization.Model): +class ApplicationTypeHealthPolicyMapItem(Model): """Defines an item in ApplicationTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the application type health policy map item. This is the name - of the application type. + :param key: Required. The key of the application type health policy map + item. This is the name of the application type. :type key: str - :param value: Required. The value of the application type health policy map item. - The max percent unhealthy applications allowed for the application type. Must be between zero - and 100. + :param value: Required. The value of the application type health policy + map item. + The max percent unhealthy applications allowed for the application type. + Must be between zero and 100. :type value: int """ @@ -2824,25 +2389,20 @@ class ApplicationTypeHealthPolicyMapItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'int'}, } - def __init__( - self, - *, - key: str, - value: int, - **kwargs - ): + def __init__(self, *, key: str, value: int, **kwargs) -> None: super(ApplicationTypeHealthPolicyMapItem, self).__init__(**kwargs) self.key = key self.value = value -class ApplicationTypeImageStorePath(msrest.serialization.Model): - """Path description for the application package in the image store specified during the prior copy operation. +class ApplicationTypeImageStorePath(Model): + """Path description for the application package in the image store specified + during the prior copy operation. All required parameters must be populated in order to send to Azure. - :param application_type_build_path: Required. The relative image store path to the application - package. + :param application_type_build_path: Required. The relative image store + path to the application package. :type application_type_build_path: str """ @@ -2854,35 +2414,34 @@ class ApplicationTypeImageStorePath(msrest.serialization.Model): 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, } - def __init__( - self, - *, - application_type_build_path: str, - **kwargs - ): + def __init__(self, *, application_type_build_path: str, **kwargs) -> None: super(ApplicationTypeImageStorePath, self).__init__(**kwargs) self.application_type_build_path = application_type_build_path -class ApplicationTypeInfo(msrest.serialization.Model): +class ApplicationTypeInfo(Model): """Information about an application type. - :param name: The application type name as defined in the application manifest. + :param name: The application type name as defined in the application + manifest. :type name: str - :param version: The version of the application type as defined in the application manifest. + :param version: The version of the application type as defined in the + application manifest. :type version: str - :param default_parameter_list: List of application type parameters that can be overridden when - creating or updating the application. - :type default_parameter_list: list[~azure.servicefabric.models.ApplicationParameter] - :param status: The status of the application type. Possible values include: "Invalid", - "Provisioning", "Available", "Unprovisioning", "Failed". + :param default_parameter_list: List of application type parameters that + can be overridden when creating or updating the application. + :type default_parameter_list: + list[~azure.servicefabric.models.ApplicationParameter] + :param status: The status of the application type. Possible values + include: 'Invalid', 'Provisioning', 'Available', 'Unprovisioning', + 'Failed' :type status: str or ~azure.servicefabric.models.ApplicationTypeStatus - :param status_details: Additional detailed information about the status of the application - type. + :param status_details: Additional detailed information about the status of + the application type. :type status_details: str - :param application_type_definition_kind: The mechanism used to define a Service Fabric - application type. Possible values include: "Invalid", "ServiceFabricApplicationPackage", - "Compose". + :param application_type_definition_kind: The mechanism used to define a + Service Fabric application type. Possible values include: 'Invalid', + 'ServiceFabricApplicationPackage', 'Compose' :type application_type_definition_kind: str or ~azure.servicefabric.models.ApplicationTypeDefinitionKind """ @@ -2896,17 +2455,7 @@ class ApplicationTypeInfo(msrest.serialization.Model): 'application_type_definition_kind': {'key': 'ApplicationTypeDefinitionKind', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - version: Optional[str] = None, - default_parameter_list: Optional[List["ApplicationParameter"]] = None, - status: Optional[Union[str, "ApplicationTypeStatus"]] = None, - status_details: Optional[str] = None, - application_type_definition_kind: Optional[Union[str, "ApplicationTypeDefinitionKind"]] = None, - **kwargs - ): + def __init__(self, *, name: str=None, version: str=None, default_parameter_list=None, status=None, status_details: str=None, application_type_definition_kind=None, **kwargs) -> None: super(ApplicationTypeInfo, self).__init__(**kwargs) self.name = name self.version = version @@ -2916,8 +2465,9 @@ def __init__( self.application_type_definition_kind = application_type_definition_kind -class ApplicationTypeManifest(msrest.serialization.Model): - """Contains the manifest describing an application type registered in a Service Fabric cluster. +class ApplicationTypeManifest(Model): + """Contains the manifest describing an application type registered in a + Service Fabric cluster. :param manifest: The XML manifest as a string. :type manifest: str @@ -2927,12 +2477,7 @@ class ApplicationTypeManifest(msrest.serialization.Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__( - self, - *, - manifest: Optional[str] = None, - **kwargs - ): + def __init__(self, *, manifest: str=None, **kwargs) -> None: super(ApplicationTypeManifest, self).__init__(**kwargs) self.manifest = manifest @@ -2942,57 +2487,39 @@ class ApplicationUpgradeCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str :param application_type_version: Required. Application type version. :type application_type_version: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -3000,87 +2527,96 @@ class ApplicationUpgradeCompletedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_type_name: str, - application_type_version: str, - overall_upgrade_elapsed_time_in_ms: float, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationUpgradeCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationUpgradeCompleted' # type: str self.application_type_name = application_type_name self.application_type_version = application_type_version self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ApplicationUpgradeCompleted' -class ApplicationUpgradeDescription(msrest.serialization.Model): - """Describes the parameters for an application upgrade. Note that upgrade description replaces the existing application description. This means that if the parameters are not specified, the existing parameters on the applications will be overwritten with the empty parameters list. This would result in the application using the default value of the parameters from the application manifest. If you do not want to change any existing parameter values, please get the application parameters first using the GetApplicationInfo query and then supply those values as Parameters in this ApplicationUpgradeDescription. +class ApplicationUpgradeDescription(Model): + """Describes the parameters for an application upgrade. Note that upgrade + description replaces the existing application description. This means that + if the parameters are not specified, the existing parameters on the + applications will be overwritten with the empty parameters list. This would + result in the application using the default value of the parameters from + the application manifest. If you do not want to change any existing + parameter values, please get the application parameters first using the + GetApplicationInfo query and then supply those values as Parameters in this + ApplicationUpgradeDescription. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the target application, including the 'fabric:' URI scheme. + :param name: Required. The name of the target application, including the + 'fabric:' URI scheme. :type name: str - :param target_application_type_version: Required. The target application type version (found in - the application manifest) for the application upgrade. + :param target_application_type_version: Required. The target application + type version (found in the application manifest) for the application + upgrade. :type target_application_type_version: str - :param parameters: List of application parameters with overridden values from their default - values specified in the application manifest. + :param parameters: List of application parameters with overridden values + from their default values specified in the application manifest. :type parameters: list[~azure.servicefabric.models.ApplicationParameter] - :param upgrade_kind: Required. The kind of upgrade out of the following possible values. - Possible values include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible - values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", - "ReverseLexicographical". Default value: "Default". + :param sort_order: Defines the order in which an upgrade proceeds through + the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', + 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default + value: "Default" . :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate the health of an - application or one of its children entities. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a - stateless instance is closed, to allow the active requests to drain gracefully. This would be - effective when the instance is closing during the application/cluster - upgrade, only for those instances which have a non-zero delay duration configured in the - service description. See InstanceCloseDelayDurationSeconds property in $ref: + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param instance_close_delay_duration_in_seconds: Duration in seconds, to + wait before a stateless instance is closed, to allow the active requests + to drain gracefully. This would be effective when the instance is closing + during the application/cluster + upgrade, only for those instances which have a non-zero delay duration + configured in the service description. See + InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates - that the behavior will entirely depend on the delay configured in the stateless service - description. + Note, the default value of InstanceCloseDelayDurationInSeconds is + 4294967295, which indicates that the behavior will entirely depend on the + delay configured in the stateless service description. :type instance_close_delay_duration_in_seconds: long - :param managed_application_identity: Managed application identity description. + :param managed_application_identity: Managed application identity + description. :type managed_application_identity: ~azure.servicefabric.models.ManagedApplicationIdentityDescription """ @@ -3106,23 +2642,7 @@ class ApplicationUpgradeDescription(msrest.serialization.Model): 'managed_application_identity': {'key': 'ManagedApplicationIdentity', 'type': 'ManagedApplicationIdentityDescription'}, } - def __init__( - self, - *, - name: str, - target_application_type_version: str, - upgrade_kind: Union[str, "UpgradeKind"] = "Rolling", - parameters: Optional[List["ApplicationParameter"]] = None, - rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", - upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, - force_restart: Optional[bool] = False, - sort_order: Optional[Union[str, "UpgradeSortOrder"]] = "Default", - monitoring_policy: Optional["MonitoringPolicyDescription"] = None, - application_health_policy: Optional["ApplicationHealthPolicy"] = None, - instance_close_delay_duration_in_seconds: Optional[int] = 4294967295, - managed_application_identity: Optional["ManagedApplicationIdentityDescription"] = None, - **kwargs - ): + def __init__(self, *, name: str, target_application_type_version: str, parameters=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, sort_order="Default", monitoring_policy=None, application_health_policy=None, instance_close_delay_duration_in_seconds: int=None, managed_application_identity=None, **kwargs) -> None: super(ApplicationUpgradeDescription, self).__init__(**kwargs) self.name = name self.target_application_type_version = target_application_type_version @@ -3143,63 +2663,47 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application type version. + :param current_application_type_version: Required. Current Application + type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type version. + :param application_type_version: Required. Target Application type + version. :type application_type_version: str :param upgrade_state: Required. State of upgrade. :type upgrade_state: str :param upgrade_domains: Required. Upgrade domains. :type upgrade_domains: str - :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain in milli-seconds. + :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain + in milli-seconds. :type upgrade_domain_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -3210,11 +2714,11 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -3224,91 +2728,86 @@ class ApplicationUpgradeDomainCompletedEvent(ApplicationEvent): 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_type_name: str, - current_application_type_version: str, - application_type_version: str, - upgrade_state: str, - upgrade_domains: str, - upgrade_domain_elapsed_time_in_ms: float, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, current_application_type_version: str, application_type_version: str, upgrade_state: str, upgrade_domains: str, upgrade_domain_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationUpgradeDomainCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationUpgradeDomainCompleted' # type: str self.application_type_name = application_type_name self.current_application_type_version = current_application_type_version self.application_type_version = application_type_version self.upgrade_state = upgrade_state self.upgrade_domains = upgrade_domains self.upgrade_domain_elapsed_time_in_ms = upgrade_domain_elapsed_time_in_ms + self.kind = 'ApplicationUpgradeDomainCompleted' -class ApplicationUpgradeProgressInfo(msrest.serialization.Model): +class ApplicationUpgradeProgressInfo(Model): """Describes the parameters for an application upgrade. - :param name: The name of the target application, including the 'fabric:' URI scheme. + :param name: The name of the target application, including the 'fabric:' + URI scheme. :type name: str - :param type_name: The application type name as defined in the application manifest. + :param type_name: The application type name as defined in the application + manifest. :type type_name: str - :param target_application_type_version: The target application type version (found in the - application manifest) for the application upgrade. + :param target_application_type_version: The target application type + version (found in the application manifest) for the application upgrade. :type target_application_type_version: str :param upgrade_domains: List of upgrade domains and their statuses. :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] - :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", - "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", - "RollingForwardInProgress", "RollingForwardCompleted", "Failed". + :param upgrade_state: The state of the upgrade domain. Possible values + include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', + 'RollingForwardPending', 'RollingForwardInProgress', + 'RollingForwardCompleted', 'Failed' :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState - :param next_upgrade_domain: The name of the next upgrade domain to be processed. + :param next_upgrade_domain: The name of the next upgrade domain to be + processed. :type next_upgrade_domain: str - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_description: Describes the parameters for an application upgrade. Note that - upgrade description replaces the existing application description. This means that if the - parameters are not specified, the existing parameters on the applications will be overwritten - with the empty parameters list. This would result in the application using the default value of - the parameters from the application manifest. If you do not want to change any existing - parameter values, please get the application parameters first using the GetApplicationInfo - query and then supply those values as Parameters in this ApplicationUpgradeDescription. - :type upgrade_description: ~azure.servicefabric.models.ApplicationUpgradeDescription - :param upgrade_duration_in_milliseconds: The estimated total amount of time spent processing - the overall upgrade. + :param upgrade_description: Describes the parameters for an application + upgrade. Note that upgrade description replaces the existing application + description. This means that if the parameters are not specified, the + existing parameters on the applications will be overwritten with the empty + parameters list. This would result in the application using the default + value of the parameters from the application manifest. If you do not want + to change any existing parameter values, please get the application + parameters first using the GetApplicationInfo query and then supply those + values as Parameters in this ApplicationUpgradeDescription. + :type upgrade_description: + ~azure.servicefabric.models.ApplicationUpgradeDescription + :param upgrade_duration_in_milliseconds: The estimated total amount of + time spent processing the overall upgrade. :type upgrade_duration_in_milliseconds: str - :param upgrade_domain_duration_in_milliseconds: The estimated total amount of time spent - processing the current upgrade domain. + :param upgrade_domain_duration_in_milliseconds: The estimated total amount + of time spent processing the current upgrade domain. :type upgrade_domain_duration_in_milliseconds: str - :param unhealthy_evaluations: List of health evaluations that resulted in the current - aggregated health state. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current in-progress upgrade - domain. + :param unhealthy_evaluations: List of health evaluations that resulted in + the current aggregated health state. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current + in-progress upgrade domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo - :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade + started. :type start_timestamp_utc: str - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and - FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade + failed and FailureAction was executed. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being - executed. Possible values include: "None", "Interrupted", "HealthCheck", - "UpgradeDomainTimeout", "OverallUpgradeTimeout". + :param failure_reason: The cause of an upgrade failure that resulted in + FailureAction being executed. Possible values include: 'None', + 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', + 'OverallUpgradeTimeout' :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: Information about the upgrade domain progress at the - time of upgrade failure. + :param upgrade_domain_progress_at_failure: Information about the upgrade + domain progress at the time of upgrade failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo - :param upgrade_status_details: Additional detailed information about the status of the pending - upgrade. + :param upgrade_status_details: Additional detailed information about the + status of the pending upgrade. :type upgrade_status_details: str """ @@ -3332,28 +2831,7 @@ class ApplicationUpgradeProgressInfo(msrest.serialization.Model): 'upgrade_status_details': {'key': 'UpgradeStatusDetails', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - type_name: Optional[str] = None, - target_application_type_version: Optional[str] = None, - upgrade_domains: Optional[List["UpgradeDomainInfo"]] = None, - upgrade_state: Optional[Union[str, "UpgradeState"]] = None, - next_upgrade_domain: Optional[str] = None, - rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", - upgrade_description: Optional["ApplicationUpgradeDescription"] = None, - upgrade_duration_in_milliseconds: Optional[str] = None, - upgrade_domain_duration_in_milliseconds: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - current_upgrade_domain_progress: Optional["CurrentUpgradeDomainProgressInfo"] = None, - start_timestamp_utc: Optional[str] = None, - failure_timestamp_utc: Optional[str] = None, - failure_reason: Optional[Union[str, "FailureReason"]] = None, - upgrade_domain_progress_at_failure: Optional["FailureUpgradeDomainProgressInfo"] = None, - upgrade_status_details: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, type_name: str=None, target_application_type_version: str=None, upgrade_domains=None, upgrade_state=None, next_upgrade_domain: str=None, rolling_upgrade_mode="UnmonitoredAuto", upgrade_description=None, upgrade_duration_in_milliseconds: str=None, upgrade_domain_duration_in_milliseconds: str=None, unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc: str=None, failure_timestamp_utc: str=None, failure_reason=None, upgrade_domain_progress_at_failure=None, upgrade_status_details: str=None, **kwargs) -> None: super(ApplicationUpgradeProgressInfo, self).__init__(**kwargs) self.name = name self.type_name = type_name @@ -3379,44 +2857,25 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str @@ -3424,14 +2883,15 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): :type application_type_version: str :param failure_reason: Required. Describes reason of failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, @@ -3440,11 +2900,11 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, @@ -3452,26 +2912,13 @@ class ApplicationUpgradeRollbackCompletedEvent(ApplicationEvent): 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_type_name: str, - application_type_version: str, - failure_reason: str, - overall_upgrade_elapsed_time_in_ms: float, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationUpgradeRollbackCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationUpgradeRollbackCompleted' # type: str self.application_type_name = application_type_name self.application_type_version = application_type_version self.failure_reason = failure_reason self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ApplicationUpgradeRollbackCompleted' class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): @@ -3479,61 +2926,45 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application type version. + :param current_application_type_version: Required. Current Application + type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type version. + :param application_type_version: Required. Target Application type + version. :type application_type_version: str :param failure_reason: Required. Describes reason of failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time in milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -3543,11 +2974,11 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -3556,28 +2987,14 @@ class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent): 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_type_name: str, - current_application_type_version: str, - application_type_version: str, - failure_reason: str, - overall_upgrade_elapsed_time_in_ms: float, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, current_application_type_version: str, application_type_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationUpgradeRollbackStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationUpgradeRollbackStarted' # type: str self.application_type_name = application_type_name self.current_application_type_version = current_application_type_version self.application_type_version = application_type_version self.failure_reason = failure_reason self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ApplicationUpgradeRollbackStarted' class ApplicationUpgradeStartedEvent(ApplicationEvent): @@ -3585,50 +3002,33 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_type_name: Required. Application type name. :type application_type_name: str - :param current_application_type_version: Required. Current Application type version. + :param current_application_type_version: Required. Current Application + type version. :type current_application_type_version: str - :param application_type_version: Required. Target Application type version. + :param application_type_version: Required. Target Application type + version. :type application_type_version: str :param upgrade_type: Required. Type of upgrade. :type upgrade_type: str @@ -3639,9 +3039,9 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_type_name': {'required': True}, 'current_application_type_version': {'required': True}, @@ -3652,11 +3052,11 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, @@ -3666,48 +3066,37 @@ class ApplicationUpgradeStartedEvent(ApplicationEvent): 'failure_action': {'key': 'FailureAction', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_type_name: str, - current_application_type_version: str, - application_type_version: str, - upgrade_type: str, - rolling_upgrade_mode: str, - failure_action: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, current_application_type_version: str, application_type_version: str, upgrade_type: str, rolling_upgrade_mode: str, failure_action: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ApplicationUpgradeStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ApplicationUpgradeStarted' # type: str self.application_type_name = application_type_name self.current_application_type_version = current_application_type_version self.application_type_version = application_type_version self.upgrade_type = upgrade_type self.rolling_upgrade_mode = rolling_upgrade_mode self.failure_action = failure_action + self.kind = 'ApplicationUpgradeStarted' -class ApplicationUpgradeUpdateDescription(msrest.serialization.Model): +class ApplicationUpgradeUpdateDescription(Model): """Describes the parameters for updating an ongoing application upgrade. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the application, including the 'fabric:' URI scheme. + :param name: Required. The name of the application, including the + 'fabric:' URI scheme. :type name: str - :param upgrade_kind: Required. The kind of upgrade out of the following possible values. - Possible values include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param application_health_policy: Defines a health policy used to evaluate the health of an - application or one of its children entities. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :param update_description: Describes the parameters for updating a rolling upgrade of - application or cluster. - :type update_description: ~azure.servicefabric.models.RollingUpgradeUpdateDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param update_description: Describes the parameters for updating a rolling + upgrade of application or cluster. + :type update_description: + ~azure.servicefabric.models.RollingUpgradeUpdateDescription """ _validation = { @@ -3722,15 +3111,7 @@ class ApplicationUpgradeUpdateDescription(msrest.serialization.Model): 'update_description': {'key': 'UpdateDescription', 'type': 'RollingUpgradeUpdateDescription'}, } - def __init__( - self, - *, - name: str, - upgrade_kind: Union[str, "UpgradeKind"] = "Rolling", - application_health_policy: Optional["ApplicationHealthPolicy"] = None, - update_description: Optional["RollingUpgradeUpdateDescription"] = None, - **kwargs - ): + def __init__(self, *, name: str, upgrade_kind="Rolling", application_health_policy=None, update_description=None, **kwargs) -> None: super(ApplicationUpgradeUpdateDescription, self).__init__(**kwargs) self.name = name self.upgrade_kind = upgrade_kind @@ -3738,17 +3119,17 @@ def __init__( self.update_description = update_description -class AutoScalingMetric(msrest.serialization.Model): - """Describes the metric that is used for triggering auto scaling operation. Derived classes will describe resources or metrics. +class AutoScalingMetric(Model): + """Describes the metric that is used for triggering auto scaling operation. + Derived classes will describe resources or metrics. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AutoScalingResourceMetric. + sub-classes are: AutoScalingResourceMetric All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling metric.Constant filled by server. Possible - values include: "Resource". - :type kind: str or ~azure.servicefabric.models.AutoScalingMetricKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -3763,25 +3144,23 @@ class AutoScalingMetric(msrest.serialization.Model): 'kind': {'Resource': 'AutoScalingResourceMetric'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(AutoScalingMetric, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None -class AutoScalingPolicy(msrest.serialization.Model): +class AutoScalingPolicy(Model): """Describes the auto scaling policy. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the auto scaling policy. :type name: str - :param trigger: Required. Determines when auto scaling operation will be invoked. - :type trigger: ~azure.servicefabric.models.AutoScalingTrigger - :param mechanism: Required. The mechanism that is used to scale when auto scaling operation is + :param trigger: Required. Determines when auto scaling operation will be invoked. + :type trigger: ~azure.servicefabric.models.AutoScalingTrigger + :param mechanism: Required. The mechanism that is used to scale when auto + scaling operation is invoked. :type mechanism: ~azure.servicefabric.models.AutoScalingMechanism """ @@ -3797,14 +3176,7 @@ class AutoScalingPolicy(msrest.serialization.Model): 'mechanism': {'key': 'mechanism', 'type': 'AutoScalingMechanism'}, } - def __init__( - self, - *, - name: str, - trigger: "AutoScalingTrigger", - mechanism: "AutoScalingMechanism", - **kwargs - ): + def __init__(self, *, name: str, trigger, mechanism, **kwargs) -> None: super(AutoScalingPolicy, self).__init__(**kwargs) self.name = name self.trigger = trigger @@ -3816,11 +3188,12 @@ class AutoScalingResourceMetric(AutoScalingMetric): All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling metric.Constant filled by server. Possible - values include: "Resource". - :type kind: str or ~azure.servicefabric.models.AutoScalingMetricKind - :param name: Required. Name of the resource. Possible values include: "cpu", "memoryInGB". - :type name: str or ~azure.servicefabric.models.AutoScalingResourceMetricName + :param kind: Required. Constant filled by server. + :type kind: str + :param name: Required. Name of the resource. Possible values include: + 'cpu', 'memoryInGB' + :type name: str or + ~azure.servicefabric.models.AutoScalingResourceMetricName """ _validation = { @@ -3833,28 +3206,22 @@ class AutoScalingResourceMetric(AutoScalingMetric): 'name': {'key': 'name', 'type': 'str'}, } - def __init__( - self, - *, - name: Union[str, "AutoScalingResourceMetricName"], - **kwargs - ): + def __init__(self, *, name, **kwargs) -> None: super(AutoScalingResourceMetric, self).__init__(**kwargs) - self.kind = 'Resource' # type: str self.name = name + self.kind = 'Resource' -class AutoScalingTrigger(msrest.serialization.Model): +class AutoScalingTrigger(Model): """Describes the trigger for performing auto scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AverageLoadScalingTrigger. + sub-classes are: AverageLoadScalingTrigger All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling trigger.Constant filled by server. Possible - values include: "AverageLoad". - :type kind: str or ~azure.servicefabric.models.AutoScalingTriggerKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -3869,12 +3236,9 @@ class AutoScalingTrigger(msrest.serialization.Model): 'kind': {'AverageLoad': 'AverageLoadScalingTrigger'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(AutoScalingTrigger, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class AverageLoadScalingTrigger(AutoScalingTrigger): @@ -3882,19 +3246,19 @@ class AverageLoadScalingTrigger(AutoScalingTrigger): All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of auto scaling trigger.Constant filled by server. Possible - values include: "AverageLoad". - :type kind: str or ~azure.servicefabric.models.AutoScalingTriggerKind - :param metric: Required. Description of the metric that is used for scaling. + :param kind: Required. Constant filled by server. + :type kind: str + :param metric: Required. Description of the metric that is used for + scaling. :type metric: ~azure.servicefabric.models.AutoScalingMetric - :param lower_load_threshold: Required. Lower load threshold (if average load is below this - threshold, service will scale down). + :param lower_load_threshold: Required. Lower load threshold (if average + load is below this threshold, service will scale down). :type lower_load_threshold: float - :param upper_load_threshold: Required. Upper load threshold (if average load is above this - threshold, service will scale up). + :param upper_load_threshold: Required. Upper load threshold (if average + load is above this threshold, service will scale up). :type upper_load_threshold: float - :param scale_interval_in_seconds: Required. Scale interval that indicates how often will this - trigger be checked. + :param scale_interval_in_seconds: Required. Scale interval that indicates + how often will this trigger be checked. :type scale_interval_in_seconds: int """ @@ -3914,34 +3278,26 @@ class AverageLoadScalingTrigger(AutoScalingTrigger): 'scale_interval_in_seconds': {'key': 'scaleIntervalInSeconds', 'type': 'int'}, } - def __init__( - self, - *, - metric: "AutoScalingMetric", - lower_load_threshold: float, - upper_load_threshold: float, - scale_interval_in_seconds: int, - **kwargs - ): + def __init__(self, *, metric, lower_load_threshold: float, upper_load_threshold: float, scale_interval_in_seconds: int, **kwargs) -> None: super(AverageLoadScalingTrigger, self).__init__(**kwargs) - self.kind = 'AverageLoad' # type: str self.metric = metric self.lower_load_threshold = lower_load_threshold self.upper_load_threshold = upper_load_threshold self.scale_interval_in_seconds = scale_interval_in_seconds + self.kind = 'AverageLoad' -class ScalingTriggerDescription(msrest.serialization.Model): +class ScalingTriggerDescription(Model): """Describes the trigger for performing a scaling operation. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AveragePartitionLoadScalingTrigger, AverageServiceLoadScalingTrigger. + sub-classes are: AveragePartitionLoadScalingTrigger, + AverageServiceLoadScalingTrigger All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. - Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". - :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -3956,32 +3312,30 @@ class ScalingTriggerDescription(msrest.serialization.Model): 'kind': {'AveragePartitionLoad': 'AveragePartitionLoadScalingTrigger', 'AverageServiceLoad': 'AverageServiceLoadScalingTrigger'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(ScalingTriggerDescription, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): - """Represents a scaling trigger related to an average load of a metric/resource of a partition. + """Represents a scaling trigger related to an average load of a + metric/resource of a partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. - Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". - :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind - :param metric_name: Required. The name of the metric for which usage should be tracked. + :param kind: Required. Constant filled by server. + :type kind: str + :param metric_name: Required. The name of the metric for which usage + should be tracked. :type metric_name: str - :param lower_load_threshold: Required. The lower limit of the load below which a scale in - operation should be performed. + :param lower_load_threshold: Required. The lower limit of the load below + which a scale in operation should be performed. :type lower_load_threshold: str - :param upper_load_threshold: Required. The upper limit of the load beyond which a scale out - operation should be performed. + :param upper_load_threshold: Required. The upper limit of the load beyond + which a scale out operation should be performed. :type upper_load_threshold: str - :param scale_interval_in_seconds: Required. The period in seconds on which a decision is made - whether to scale or not. + :param scale_interval_in_seconds: Required. The period in seconds on which + a decision is made whether to scale or not. :type scale_interval_in_seconds: long """ @@ -4001,46 +3355,39 @@ class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, } - def __init__( - self, - *, - metric_name: str, - lower_load_threshold: str, - upper_load_threshold: str, - scale_interval_in_seconds: int, - **kwargs - ): + def __init__(self, *, metric_name: str, lower_load_threshold: str, upper_load_threshold: str, scale_interval_in_seconds: int, **kwargs) -> None: super(AveragePartitionLoadScalingTrigger, self).__init__(**kwargs) - self.kind = 'AveragePartitionLoad' # type: str self.metric_name = metric_name self.lower_load_threshold = lower_load_threshold self.upper_load_threshold = upper_load_threshold self.scale_interval_in_seconds = scale_interval_in_seconds + self.kind = 'AveragePartitionLoad' class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): - """Represents a scaling policy related to an average load of a metric/resource of a service. + """Represents a scaling policy related to an average load of a metric/resource + of a service. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling trigger.Constant filled by server. - Possible values include: "Invalid", "AveragePartitionLoad", "AverageServiceLoad". - :type kind: str or ~azure.servicefabric.models.ScalingTriggerKind - :param metric_name: Required. The name of the metric for which usage should be tracked. + :param kind: Required. Constant filled by server. + :type kind: str + :param metric_name: Required. The name of the metric for which usage + should be tracked. :type metric_name: str - :param lower_load_threshold: Required. The lower limit of the load below which a scale in - operation should be performed. + :param lower_load_threshold: Required. The lower limit of the load below + which a scale in operation should be performed. :type lower_load_threshold: str - :param upper_load_threshold: Required. The upper limit of the load beyond which a scale out - operation should be performed. + :param upper_load_threshold: Required. The upper limit of the load beyond + which a scale out operation should be performed. :type upper_load_threshold: str - :param scale_interval_in_seconds: Required. The period in seconds on which a decision is made - whether to scale or not. + :param scale_interval_in_seconds: Required. The period in seconds on which + a decision is made whether to scale or not. :type scale_interval_in_seconds: long - :param use_only_primary_load: Required. Flag determines whether only the load of primary - replica should be considered for scaling. - If set to true, then trigger will only consider the load of primary replicas of stateful - service. + :param use_only_primary_load: Required. Flag determines whether only the + load of primary replica should be considered for scaling. + If set to true, then trigger will only consider the load of primary + replicas of stateful service. If set to false, trigger will consider load of all replicas. This parameter cannot be set to true for stateless service. :type use_only_primary_load: bool @@ -4064,39 +3411,30 @@ class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): 'use_only_primary_load': {'key': 'UseOnlyPrimaryLoad', 'type': 'bool'}, } - def __init__( - self, - *, - metric_name: str, - lower_load_threshold: str, - upper_load_threshold: str, - scale_interval_in_seconds: int, - use_only_primary_load: bool, - **kwargs - ): + def __init__(self, *, metric_name: str, lower_load_threshold: str, upper_load_threshold: str, scale_interval_in_seconds: int, use_only_primary_load: bool, **kwargs) -> None: super(AverageServiceLoadScalingTrigger, self).__init__(**kwargs) - self.kind = 'AverageServiceLoad' # type: str self.metric_name = metric_name self.lower_load_threshold = lower_load_threshold self.upper_load_threshold = upper_load_threshold self.scale_interval_in_seconds = scale_interval_in_seconds self.use_only_primary_load = use_only_primary_load + self.kind = 'AverageServiceLoad' -class BackupStorageDescription(msrest.serialization.Model): +class BackupStorageDescription(Model): """Describes the parameters for the backup storage. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AzureBlobBackupStorageDescription, DsmsAzureBlobBackupStorageDescription, FileShareBackupStorageDescription, ManagedIdentityAzureBlobBackupStorageDescription. + sub-classes are: AzureBlobBackupStorageDescription, + FileShareBackupStorageDescription, DsmsAzureBlobBackupStorageDescription, + ManagedIdentityAzureBlobBackupStorageDescription All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str """ _validation = { @@ -4104,40 +3442,35 @@ class BackupStorageDescription(msrest.serialization.Model): } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, } _subtype_map = { - 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'DsmsAzureBlobStore': 'DsmsAzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription', 'ManagedIdentityAzureBlobStore': 'ManagedIdentityAzureBlobBackupStorageDescription'} + 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription', 'DsmsAzureBlobStore': 'DsmsAzureBlobBackupStorageDescription', 'ManagedIdentityAzureBlobStore': 'ManagedIdentityAzureBlobBackupStorageDescription'} } - def __init__( - self, - *, - friendly_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, friendly_name: str=None, **kwargs) -> None: super(BackupStorageDescription, self).__init__(**kwargs) - self.storage_kind = None # type: Optional[str] self.friendly_name = friendly_name + self.storage_kind = None class AzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Azure blob store used for storing and enumerating backups. + """Describes the parameters for Azure blob store used for storing and + enumerating backups. All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param connection_string: Required. The connection string to connect to the Azure blob store. + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param connection_string: Required. The connection string to connect to + the Azure blob store. :type connection_string: str - :param container_name: Required. The name of the container in the blob store to store and - enumerate backups from. + :param container_name: Required. The name of the container in the blob + store to store and enumerate backups from. :type container_name: str """ @@ -4148,41 +3481,34 @@ class AzureBlobBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'connection_string': {'key': 'ConnectionString', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__( - self, - *, - connection_string: str, - container_name: str, - friendly_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, connection_string: str, container_name: str, friendly_name: str=None, **kwargs) -> None: super(AzureBlobBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) - self.storage_kind = 'AzureBlobStore' # type: str self.connection_string = connection_string self.container_name = container_name + self.storage_kind = 'AzureBlobStore' -class DiagnosticsSinkProperties(msrest.serialization.Model): +class DiagnosticsSinkProperties(Model): """Properties of a DiagnosticsSink. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AzureInternalMonitoringPipelineSinkDescription. + sub-classes are: AzureInternalMonitoringPipelineSinkDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of DiagnosticsSink.Constant filled by server. Possible values - include: "Invalid", "AzureInternalMonitoringPipeline". - :type kind: str or ~azure.servicefabric.models.DiagnosticsSinkKind - :param name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription. + :param name: Name of the sink. This value is referenced by + DiagnosticsReferenceDescription :type name: str :param description: A description of the sink. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -4190,26 +3516,20 @@ class DiagnosticsSinkProperties(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'kind', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, } _subtype_map = { 'kind': {'AzureInternalMonitoringPipeline': 'AzureInternalMonitoringPipelineSinkDescription'} } - def __init__( - self, - *, - name: Optional[str] = None, - description: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, description: str=None, **kwargs) -> None: super(DiagnosticsSinkProperties, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.name = name self.description = description + self.kind = None class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): @@ -4217,23 +3537,24 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of DiagnosticsSink.Constant filled by server. Possible values - include: "Invalid", "AzureInternalMonitoringPipeline". - :type kind: str or ~azure.servicefabric.models.DiagnosticsSinkKind - :param name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription. + :param name: Name of the sink. This value is referenced by + DiagnosticsReferenceDescription :type name: str :param description: A description of the sink. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param account_name: Azure Internal monitoring pipeline account. :type account_name: str :param namespace: Azure Internal monitoring pipeline account namespace. :type namespace: str :param ma_config_url: Azure Internal monitoring agent configuration. :type ma_config_url: str - :param fluentd_config_url: Azure Internal monitoring agent fluentd configuration. + :param fluentd_config_url: Azure Internal monitoring agent fluentd + configuration. :type fluentd_config_url: str - :param auto_key_config_url: Azure Internal monitoring pipeline autokey associated with the - certificate. + :param auto_key_config_url: Azure Internal monitoring pipeline autokey + associated with the certificate. :type auto_key_config_url: str """ @@ -4242,9 +3563,9 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): } _attribute_map = { - 'kind': {'key': 'kind', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, 'account_name': {'key': 'accountName', 'type': 'str'}, 'namespace': {'key': 'namespace', 'type': 'str'}, 'ma_config_url': {'key': 'maConfigUrl', 'type': 'str'}, @@ -4252,57 +3573,53 @@ class AzureInternalMonitoringPipelineSinkDescription(DiagnosticsSinkProperties): 'auto_key_config_url': {'key': 'autoKeyConfigUrl', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - description: Optional[str] = None, - account_name: Optional[str] = None, - namespace: Optional[str] = None, - ma_config_url: Optional[str] = None, - fluentd_config_url: Optional[str] = None, - auto_key_config_url: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, description: str=None, account_name: str=None, namespace: str=None, ma_config_url: str=None, fluentd_config_url: str=None, auto_key_config_url: str=None, **kwargs) -> None: super(AzureInternalMonitoringPipelineSinkDescription, self).__init__(name=name, description=description, **kwargs) - self.kind = 'AzureInternalMonitoringPipeline' # type: str self.account_name = account_name self.namespace = namespace self.ma_config_url = ma_config_url self.fluentd_config_url = fluentd_config_url self.auto_key_config_url = auto_key_config_url + self.kind = 'AzureInternalMonitoringPipeline' -class BackupInfo(msrest.serialization.Model): +class BackupInfo(Model): """Represents a backup point which can be used to trigger a restore. :param backup_id: Unique backup ID . :type backup_id: str - :param backup_chain_id: Unique backup chain ID. All backups part of the same chain has the same - backup chain id. A backup chain is comprised of 1 full backup and multiple incremental backups. + :param backup_chain_id: Unique backup chain ID. All backups part of the + same chain has the same backup chain id. A backup chain is comprised of 1 + full backup and multiple incremental backups. :type backup_chain_id: str - :param application_name: Name of the Service Fabric application this partition backup belongs - to. + :param application_name: Name of the Service Fabric application this + partition backup belongs to. :type application_name: str - :param service_name: Name of the Service Fabric service this partition backup belongs to. + :param service_name: Name of the Service Fabric service this partition + backup belongs to. :type service_name: str - :param partition_information: Information about the partition to which this backup belongs to. - :type partition_information: ~azure.servicefabric.models.PartitionInformation - :param backup_location: Location of the backup, relative to the backup store. + :param partition_information: Information about the partition to which + this backup belongs to + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param backup_location: Location of the backup, relative to the backup + store. :type backup_location: str - :param backup_type: Describes the type of backup, whether its full or incremental. Possible - values include: "Invalid", "Full", "Incremental". + :param backup_type: Describes the type of backup, whether its full or + incremental. Possible values include: 'Invalid', 'Full', 'Incremental' :type backup_type: str or ~azure.servicefabric.models.BackupType - :param epoch_of_last_backup_record: Epoch of the last record in this backup. + :param epoch_of_last_backup_record: Epoch of the last record in this + backup. :type epoch_of_last_backup_record: ~azure.servicefabric.models.Epoch :param lsn_of_last_backup_record: LSN of the last record in this backup. :type lsn_of_last_backup_record: str :param creation_time_utc: The date time when this backup was taken. - :type creation_time_utc: ~datetime.datetime - :param service_manifest_version: Manifest Version of the service this partition backup belongs - to. + :type creation_time_utc: datetime + :param service_manifest_version: Manifest Version of the service this + partition backup belongs to. :type service_manifest_version: str - :param failure_error: Denotes the failure encountered in getting backup point information. + :param failure_error: Denotes the failure encountered in getting backup + point information. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -4321,23 +3638,7 @@ class BackupInfo(msrest.serialization.Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__( - self, - *, - backup_id: Optional[str] = None, - backup_chain_id: Optional[str] = None, - application_name: Optional[str] = None, - service_name: Optional[str] = None, - partition_information: Optional["PartitionInformation"] = None, - backup_location: Optional[str] = None, - backup_type: Optional[Union[str, "BackupType"]] = None, - epoch_of_last_backup_record: Optional["Epoch"] = None, - lsn_of_last_backup_record: Optional[str] = None, - creation_time_utc: Optional[datetime.datetime] = None, - service_manifest_version: Optional[str] = None, - failure_error: Optional["FabricErrorError"] = None, - **kwargs - ): + def __init__(self, *, backup_id: str=None, backup_chain_id: str=None, application_name: str=None, service_name: str=None, partition_information=None, backup_location: str=None, backup_type=None, epoch_of_last_backup_record=None, lsn_of_last_backup_record: str=None, creation_time_utc=None, service_manifest_version: str=None, failure_error=None, **kwargs) -> None: super(BackupInfo, self).__init__(**kwargs) self.backup_id = backup_id self.backup_chain_id = backup_chain_id @@ -4353,10 +3654,11 @@ def __init__( self.failure_error = failure_error -class BackupPartitionDescription(msrest.serialization.Model): +class BackupPartitionDescription(Model): """Describes the parameters for triggering partition's backup. - :param backup_storage: Specifies the details of the backup storage where to save the backup. + :param backup_storage: Specifies the details of the backup storage where + to save the backup. :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription """ @@ -4364,42 +3666,39 @@ class BackupPartitionDescription(msrest.serialization.Model): 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, } - def __init__( - self, - *, - backup_storage: Optional["BackupStorageDescription"] = None, - **kwargs - ): + def __init__(self, *, backup_storage=None, **kwargs) -> None: super(BackupPartitionDescription, self).__init__(**kwargs) self.backup_storage = backup_storage -class BackupPolicyDescription(msrest.serialization.Model): +class BackupPolicyDescription(Model): """Describes a backup policy for configuring periodic backup. All required parameters must be populated in order to send to Azure. :param name: Required. The unique name identifying this backup policy. :type name: str - :param auto_restore_on_data_loss: Required. Specifies whether to trigger restore automatically - using the latest available backup in case the partition experiences a data loss event. + :param auto_restore_on_data_loss: Required. Specifies whether to trigger + restore automatically using the latest available backup in case the + partition experiences a data loss event. :type auto_restore_on_data_loss: bool - :param max_incremental_backups: Required. Defines the maximum number of incremental backups to - be taken between two full backups. This is just the upper limit. A full backup may be taken - before specified number of incremental backups are completed in one of the following conditions - - - * The replica has never taken a full backup since it has become primary, - * Some of the log records since the last backup has been truncated, or - * Replica passed the MaxAccumulatedBackupLogSizeInMB limit. + :param max_incremental_backups: Required. Defines the maximum number of + incremental backups to be taken between two full backups. This is just the + upper limit. A full backup may be taken before specified number of + incremental backups are completed in one of the following conditions + - The replica has never taken a full backup since it has become primary, + - Some of the log records since the last backup has been truncated, or + - Replica passed the MaxAccumulatedBackupLogSizeInMB limit. :type max_incremental_backups: int :param schedule: Required. Describes the backup schedule parameters. :type schedule: ~azure.servicefabric.models.BackupScheduleDescription - :param storage: Required. Describes the details of backup storage where to store the periodic - backups. + :param storage: Required. Describes the details of backup storage where to + store the periodic backups. :type storage: ~azure.servicefabric.models.BackupStorageDescription - :param retention_policy: Describes the policy to retain backups in storage. - :type retention_policy: ~azure.servicefabric.models.RetentionPolicyDescription + :param retention_policy: Describes the policy to retain backups in + storage. + :type retention_policy: + ~azure.servicefabric.models.RetentionPolicyDescription """ _validation = { @@ -4419,17 +3718,7 @@ class BackupPolicyDescription(msrest.serialization.Model): 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicyDescription'}, } - def __init__( - self, - *, - name: str, - auto_restore_on_data_loss: bool, - max_incremental_backups: int, - schedule: "BackupScheduleDescription", - storage: "BackupStorageDescription", - retention_policy: Optional["RetentionPolicyDescription"] = None, - **kwargs - ): + def __init__(self, *, name: str, auto_restore_on_data_loss: bool, max_incremental_backups: int, schedule, storage, retention_policy=None, **kwargs) -> None: super(BackupPolicyDescription, self).__init__(**kwargs) self.name = name self.auto_restore_on_data_loss = auto_restore_on_data_loss @@ -4439,23 +3728,29 @@ def __init__( self.retention_policy = retention_policy -class BackupProgressInfo(msrest.serialization.Model): +class BackupProgressInfo(Model): """Describes the progress of a partition's backup. - :param backup_state: Represents the current state of the partition backup operation. Possible - values include: "Invalid", "Accepted", "BackupInProgress", "Success", "Failure", "Timeout". + :param backup_state: Represents the current state of the partition backup + operation. Possible values include: 'Invalid', 'Accepted', + 'BackupInProgress', 'Success', 'Failure', 'Timeout' :type backup_state: str or ~azure.servicefabric.models.BackupState - :param time_stamp_utc: TimeStamp in UTC when operation succeeded or failed. - :type time_stamp_utc: ~datetime.datetime + :param time_stamp_utc: TimeStamp in UTC when operation succeeded or + failed. + :type time_stamp_utc: datetime :param backup_id: Unique ID of the newly created backup. :type backup_id: str - :param backup_location: Location, relative to the backup store, of the newly created backup. + :param backup_location: Location, relative to the backup store, of the + newly created backup. :type backup_location: str - :param epoch_of_last_backup_record: Specifies the epoch of the last record included in backup. + :param epoch_of_last_backup_record: Specifies the epoch of the last record + included in backup. :type epoch_of_last_backup_record: ~azure.servicefabric.models.Epoch - :param lsn_of_last_backup_record: The LSN of last record included in backup. + :param lsn_of_last_backup_record: The LSN of last record included in + backup. :type lsn_of_last_backup_record: str - :param failure_error: Denotes the failure encountered in performing backup operation. + :param failure_error: Denotes the failure encountered in performing backup + operation. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -4469,18 +3764,7 @@ class BackupProgressInfo(msrest.serialization.Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__( - self, - *, - backup_state: Optional[Union[str, "BackupState"]] = None, - time_stamp_utc: Optional[datetime.datetime] = None, - backup_id: Optional[str] = None, - backup_location: Optional[str] = None, - epoch_of_last_backup_record: Optional["Epoch"] = None, - lsn_of_last_backup_record: Optional[str] = None, - failure_error: Optional["FabricErrorError"] = None, - **kwargs - ): + def __init__(self, *, backup_state=None, time_stamp_utc=None, backup_id: str=None, backup_location: str=None, epoch_of_last_backup_record=None, lsn_of_last_backup_record: str=None, failure_error=None, **kwargs) -> None: super(BackupProgressInfo, self).__init__(**kwargs) self.backup_state = backup_state self.time_stamp_utc = time_stamp_utc @@ -4491,18 +3775,17 @@ def __init__( self.failure_error = failure_error -class BackupScheduleDescription(msrest.serialization.Model): +class BackupScheduleDescription(Model): """Describes the backup schedule parameters. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FrequencyBasedBackupScheduleDescription, TimeBasedBackupScheduleDescription. + sub-classes are: FrequencyBasedBackupScheduleDescription, + TimeBasedBackupScheduleDescription All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. The kind of backup schedule, time based or frequency - based.Constant filled by server. Possible values include: "Invalid", "TimeBased", - "FrequencyBased". - :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str """ _validation = { @@ -4517,22 +3800,22 @@ class BackupScheduleDescription(msrest.serialization.Model): 'schedule_kind': {'FrequencyBased': 'FrequencyBasedBackupScheduleDescription', 'TimeBased': 'TimeBasedBackupScheduleDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(BackupScheduleDescription, self).__init__(**kwargs) - self.schedule_kind = None # type: Optional[str] + self.schedule_kind = None -class BackupSuspensionInfo(msrest.serialization.Model): +class BackupSuspensionInfo(Model): """Describes the backup suspension details. - :param is_suspended: Indicates whether periodic backup is suspended at this level or not. + :param is_suspended: Indicates whether periodic backup is suspended at + this level or not. :type is_suspended: bool - :param suspension_inherited_from: Specifies the scope at which the backup suspension was - applied. Possible values include: "Invalid", "Partition", "Service", "Application". - :type suspension_inherited_from: str or ~azure.servicefabric.models.BackupSuspensionScope + :param suspension_inherited_from: Specifies the scope at which the backup + suspension was applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type suspension_inherited_from: str or + ~azure.servicefabric.models.BackupSuspensionScope """ _attribute_map = { @@ -4540,30 +3823,22 @@ class BackupSuspensionInfo(msrest.serialization.Model): 'suspension_inherited_from': {'key': 'SuspensionInheritedFrom', 'type': 'str'}, } - def __init__( - self, - *, - is_suspended: Optional[bool] = None, - suspension_inherited_from: Optional[Union[str, "BackupSuspensionScope"]] = None, - **kwargs - ): + def __init__(self, *, is_suspended: bool=None, suspension_inherited_from=None, **kwargs) -> None: super(BackupSuspensionInfo, self).__init__(**kwargs) self.is_suspended = is_suspended self.suspension_inherited_from = suspension_inherited_from -class RetentionPolicyDescription(msrest.serialization.Model): +class RetentionPolicyDescription(Model): """Describes the retention policy configured. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: BasicRetentionPolicyDescription. + sub-classes are: BasicRetentionPolicyDescription All required parameters must be populated in order to send to Azure. - :param retention_policy_type: Required. The type of retention policy. Currently only "Basic" - retention policy is supported.Constant filled by server. Possible values include: "Basic", - "Invalid". - :type retention_policy_type: str or ~azure.servicefabric.models.RetentionPolicyType + :param retention_policy_type: Required. Constant filled by server. + :type retention_policy_type: str """ _validation = { @@ -4578,12 +3853,9 @@ class RetentionPolicyDescription(msrest.serialization.Model): 'retention_policy_type': {'Basic': 'BasicRetentionPolicyDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(RetentionPolicyDescription, self).__init__(**kwargs) - self.retention_policy_type = None # type: Optional[str] + self.retention_policy_type = None class BasicRetentionPolicyDescription(RetentionPolicyDescription): @@ -4591,17 +3863,16 @@ class BasicRetentionPolicyDescription(RetentionPolicyDescription): All required parameters must be populated in order to send to Azure. - :param retention_policy_type: Required. The type of retention policy. Currently only "Basic" - retention policy is supported.Constant filled by server. Possible values include: "Basic", - "Invalid". - :type retention_policy_type: str or ~azure.servicefabric.models.RetentionPolicyType - :param retention_duration: Required. It is the minimum duration for which a backup created, - will remain stored in the storage and might get deleted after that span of time. It should be - specified in ISO8601 format. - :type retention_duration: ~datetime.timedelta - :param minimum_number_of_backups: It is the minimum number of backups to be retained at any - point of time. If specified with a non zero value, backups will not be deleted even if the - backups have gone past retention duration and have number of backups less than or equal to it. + :param retention_policy_type: Required. Constant filled by server. + :type retention_policy_type: str + :param retention_duration: Required. It is the minimum duration for which + a backup created, will remain stored in the storage and might get deleted + after that span of time. It should be specified in ISO8601 format. + :type retention_duration: timedelta + :param minimum_number_of_backups: It is the minimum number of backups to + be retained at any point of time. If specified with a non zero value, + backups will not be deleted even if the backups have gone past retention + duration and have number of backups less than or equal to it. :type minimum_number_of_backups: int """ @@ -4617,31 +3888,24 @@ class BasicRetentionPolicyDescription(RetentionPolicyDescription): 'minimum_number_of_backups': {'key': 'MinimumNumberOfBackups', 'type': 'int'}, } - def __init__( - self, - *, - retention_duration: datetime.timedelta, - minimum_number_of_backups: Optional[int] = None, - **kwargs - ): + def __init__(self, *, retention_duration, minimum_number_of_backups: int=None, **kwargs) -> None: super(BasicRetentionPolicyDescription, self).__init__(**kwargs) - self.retention_policy_type = 'Basic' # type: str self.retention_duration = retention_duration self.minimum_number_of_backups = minimum_number_of_backups + self.retention_policy_type = 'Basic' -class PropertyValue(msrest.serialization.Model): +class PropertyValue(Model): """Describes a Service Fabric property value. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: BinaryPropertyValue, DoublePropertyValue, GuidPropertyValue, Int64PropertyValue, StringPropertyValue. + sub-classes are: BinaryPropertyValue, Int64PropertyValue, + DoublePropertyValue, StringPropertyValue, GuidPropertyValue All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -4653,15 +3917,12 @@ class PropertyValue(msrest.serialization.Model): } _subtype_map = { - 'kind': {'Binary': 'BinaryPropertyValue', 'Double': 'DoublePropertyValue', 'Guid': 'GuidPropertyValue', 'Int64': 'Int64PropertyValue', 'String': 'StringPropertyValue'} + 'kind': {'Binary': 'BinaryPropertyValue', 'Int64': 'Int64PropertyValue', 'Double': 'DoublePropertyValue', 'String': 'StringPropertyValue', 'Guid': 'GuidPropertyValue'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(PropertyValue, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class BinaryPropertyValue(PropertyValue): @@ -4669,12 +3930,10 @@ class BinaryPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind - :param data: Required. Array of bytes to be sent as an integer array. Each element of array is - a number between 0 and 255. + :param kind: Required. Constant filled by server. + :type kind: str + :param data: Required. Array of bytes to be sent as an integer array. Each + element of array is a number between 0 and 255. :type data: list[int] """ @@ -4688,28 +3947,25 @@ class BinaryPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': '[int]'}, } - def __init__( - self, - *, - data: List[int], - **kwargs - ): + def __init__(self, *, data, **kwargs) -> None: super(BinaryPropertyValue, self).__init__(**kwargs) - self.kind = 'Binary' # type: str self.data = data + self.kind = 'Binary' -class Chaos(msrest.serialization.Model): +class Chaos(Model): """Contains a description of Chaos. - :param chaos_parameters: If Chaos is running, these are the parameters Chaos is running with. + :param chaos_parameters: If Chaos is running, these are the parameters + Chaos is running with. :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters - :param status: Current status of the Chaos run. Possible values include: "Invalid", "Running", - "Stopped". + :param status: Current status of the Chaos run. Possible values include: + 'Invalid', 'Running', 'Stopped' :type status: str or ~azure.servicefabric.models.ChaosStatus - :param schedule_status: Current status of the schedule. Possible values include: "Invalid", - "Stopped", "Active", "Expired", "Pending". - :type schedule_status: str or ~azure.servicefabric.models.ChaosScheduleStatus + :param schedule_status: Current status of the schedule. Possible values + include: 'Invalid', 'Stopped', 'Active', 'Expired', 'Pending' + :type schedule_status: str or + ~azure.servicefabric.models.ChaosScheduleStatus """ _attribute_map = { @@ -4718,14 +3974,7 @@ class Chaos(msrest.serialization.Model): 'schedule_status': {'key': 'ScheduleStatus', 'type': 'str'}, } - def __init__( - self, - *, - chaos_parameters: Optional["ChaosParameters"] = None, - status: Optional[Union[str, "ChaosStatus"]] = None, - schedule_status: Optional[Union[str, "ChaosScheduleStatus"]] = None, - **kwargs - ): + def __init__(self, *, chaos_parameters=None, status=None, schedule_status=None, **kwargs) -> None: super(Chaos, self).__init__(**kwargs) self.chaos_parameters = chaos_parameters self.status = status @@ -4737,44 +3986,25 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -4786,14 +4016,15 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): :type service_manifest_name: str :param code_package_name: Required. Code package name. :type code_package_name: str - :param service_package_activation_id: Required. Id of Service package activation. + :param service_package_activation_id: Required. Id of Service package + activation. :type service_package_activation_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -4804,11 +4035,11 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -4818,38 +4049,27 @@ class ChaosCodePackageRestartScheduledEvent(ApplicationEvent): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - fault_group_id: str, - fault_id: str, - node_name: str, - service_manifest_name: str, - code_package_name: str, - service_package_activation_id: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, fault_group_id: str, fault_id: str, node_name: str, service_manifest_name: str, code_package_name: str, service_package_activation_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ChaosCodePackageRestartScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'ChaosCodePackageRestartScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.node_name = node_name self.service_manifest_name = service_manifest_name self.code_package_name = code_package_name self.service_package_activation_id = service_package_activation_id + self.kind = 'ChaosCodePackageRestartScheduled' -class ChaosContext(msrest.serialization.Model): - """Describes a map, which is a collection of (string, string) type key-value pairs. The map can be used to record information about -the Chaos run. There cannot be more than 100 such pairs and each string (key or value) can be at most 4095 characters long. -This map is set by the starter of the Chaos run to optionally store the context about the specific run. +class ChaosContext(Model): + """Describes a map, which is a collection of (string, string) type key-value + pairs. The map can be used to record information about + the Chaos run. There cannot be more than 100 such pairs and each string + (key or value) can be at most 4095 characters long. + This map is set by the starter of the Chaos run to optionally store the + context about the specific run. - :param map: Describes a map that contains a collection of ChaosContextMapItem's. + :param map: Describes a map that contains a collection of + ChaosContextMapItem's. :type map: dict[str, str] """ @@ -4857,65 +4077,58 @@ class ChaosContext(msrest.serialization.Model): 'map': {'key': 'Map', 'type': '{str}'}, } - def __init__( - self, - *, - map: Optional[Dict[str, str]] = None, - **kwargs - ): + def __init__(self, *, map=None, **kwargs) -> None: super(ChaosContext, self).__init__(**kwargs) self.map = map -class ChaosEvent(msrest.serialization.Model): +class ChaosEvent(Model): """Represents an event generated during a Chaos run. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ExecutingFaultsChaosEvent, StartedChaosEvent, StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, WaitingChaosEvent. + sub-classes are: ExecutingFaultsChaosEvent, StartedChaosEvent, + StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, + WaitingChaosEvent All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'ExecutingFaults': 'ExecutingFaultsChaosEvent', 'Started': 'StartedChaosEvent', 'Stopped': 'StoppedChaosEvent', 'TestError': 'TestErrorChaosEvent', 'ValidationFailed': 'ValidationFailedChaosEvent', 'Waiting': 'WaitingChaosEvent'} } - def __init__( - self, - *, - time_stamp_utc: datetime.datetime, - **kwargs - ): + def __init__(self, *, time_stamp_utc, **kwargs) -> None: super(ChaosEvent, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.time_stamp_utc = time_stamp_utc + self.kind = None -class ChaosEventsSegment(msrest.serialization.Model): - """Contains the list of Chaos events and the continuation token to get the next segment. +class ChaosEventsSegment(Model): + """Contains the list of Chaos events and the continuation token to get the + next segment. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param history: List of Chaos events that meet the user-supplied criteria. :type history: list[~azure.servicefabric.models.ChaosEventWrapper] @@ -4926,19 +4139,13 @@ class ChaosEventsSegment(msrest.serialization.Model): 'history': {'key': 'History', 'type': '[ChaosEventWrapper]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - history: Optional[List["ChaosEventWrapper"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, history=None, **kwargs) -> None: super(ChaosEventsSegment, self).__init__(**kwargs) self.continuation_token = continuation_token self.history = history -class ChaosEventWrapper(msrest.serialization.Model): +class ChaosEventWrapper(Model): """Wrapper object for Chaos event. :param chaos_event: Represents an event generated during a Chaos run. @@ -4949,12 +4156,7 @@ class ChaosEventWrapper(msrest.serialization.Model): 'chaos_event': {'key': 'ChaosEvent', 'type': 'ChaosEvent'}, } - def __init__( - self, - *, - chaos_event: Optional["ChaosEvent"] = None, - **kwargs - ): + def __init__(self, *, chaos_event=None, **kwargs) -> None: super(ChaosEventWrapper, self).__init__(**kwargs) self.chaos_event = chaos_event @@ -4963,79 +4165,54 @@ class NodeEvent(FabricEvent): """Represents the base for all Node Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ChaosNodeRestartScheduledEvent, NodeAbortedEvent, NodeAddedToClusterEvent, NodeClosedEvent, NodeDeactivateCompletedEvent, NodeDeactivateStartedEvent, NodeDownEvent, NodeHealthReportExpiredEvent, NodeNewHealthReportEvent, NodeOpenFailedEvent, NodeOpenSucceededEvent, NodeRemovedFromClusterEvent, NodeUpEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: NodeAbortedEvent, NodeAddedToClusterEvent, + NodeClosedEvent, NodeDeactivateCompletedEvent, NodeDeactivateStartedEvent, + NodeDownEvent, NodeNewHealthReportEvent, NodeHealthReportExpiredEvent, + NodeOpenSucceededEvent, NodeOpenFailedEvent, NodeRemovedFromClusterEvent, + NodeUpEvent, ChaosNodeRestartScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, } _subtype_map = { - 'kind': {'ChaosNodeRestartScheduled': 'ChaosNodeRestartScheduledEvent', 'NodeAborted': 'NodeAbortedEvent', 'NodeAddedToCluster': 'NodeAddedToClusterEvent', 'NodeClosed': 'NodeClosedEvent', 'NodeDeactivateCompleted': 'NodeDeactivateCompletedEvent', 'NodeDeactivateStarted': 'NodeDeactivateStartedEvent', 'NodeDown': 'NodeDownEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeNewHealthReport': 'NodeNewHealthReportEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeOpenSucceeded': 'NodeOpenSucceededEvent', 'NodeRemovedFromCluster': 'NodeRemovedFromClusterEvent', 'NodeUp': 'NodeUpEvent'} - } - - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + 'kind': {'NodeAborted': 'NodeAbortedEvent', 'NodeAddedToCluster': 'NodeAddedToClusterEvent', 'NodeClosed': 'NodeClosedEvent', 'NodeDeactivateCompleted': 'NodeDeactivateCompletedEvent', 'NodeDeactivateStarted': 'NodeDeactivateStartedEvent', 'NodeDown': 'NodeDownEvent', 'NodeNewHealthReport': 'NodeNewHealthReportEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeOpenSucceeded': 'NodeOpenSucceededEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeRemovedFromCluster': 'NodeRemovedFromClusterEvent', 'NodeUp': 'NodeUpEvent', 'ChaosNodeRestartScheduled': 'ChaosNodeRestartScheduledEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'NodeEvent' # type: str self.node_name = node_name + self.kind = 'NodeEvent' class ChaosNodeRestartScheduledEvent(NodeEvent): @@ -5043,38 +4220,18 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -5086,9 +4243,9 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -5096,88 +4253,85 @@ class ChaosNodeRestartScheduledEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance_id: int, - fault_group_id: str, - fault_id: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, fault_group_id: str, fault_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ChaosNodeRestartScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'ChaosNodeRestartScheduled' # type: str self.node_instance_id = node_instance_id self.fault_group_id = fault_group_id self.fault_id = fault_id + self.kind = 'ChaosNodeRestartScheduled' -class ChaosParameters(msrest.serialization.Model): +class ChaosParameters(Model): """Defines all the parameters to configure a Chaos run. - :param time_to_run_in_seconds: Total time (in seconds) for which Chaos will run before - automatically stopping. The maximum allowed value is 4,294,967,295 (System.UInt32.MaxValue). + :param time_to_run_in_seconds: Total time (in seconds) for which Chaos + will run before automatically stopping. The maximum allowed value is + 4,294,967,295 (System.UInt32.MaxValue). Default value: "4294967295" . :type time_to_run_in_seconds: str - :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of time to wait for all - cluster entities to become stable and healthy. Chaos executes in iterations and at the start of - each iteration it validates the health of cluster entities. + :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of + time to wait for all cluster entities to become stable and healthy. Chaos + executes in iterations and at the start of each iteration it validates the + health of cluster entities. During validation if a cluster entity is not stable and healthy within - MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation failed event. + MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation + failed event. Default value: 60 . :type max_cluster_stabilization_timeout_in_seconds: long - :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of concurrent faults - induced per iteration. - Chaos executes in iterations and two consecutive iterations are separated by a validation - phase. - The higher the concurrency, the more aggressive the injection of faults, leading to inducing - more complex series of states to uncover bugs. - The recommendation is to start with a value of 2 or 3 and to exercise caution while moving up. + :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of + concurrent faults induced per iteration. + Chaos executes in iterations and two consecutive iterations are separated + by a validation phase. + The higher the concurrency, the more aggressive the injection of faults, + leading to inducing more complex series of states to uncover bugs. + The recommendation is to start with a value of 2 or 3 and to exercise + caution while moving up. Default value: 1 . :type max_concurrent_faults: long - :param enable_move_replica_faults: Enables or disables the move primary and move secondary - faults. + :param enable_move_replica_faults: Enables or disables the move primary + and move secondary faults. Default value: True . :type enable_move_replica_faults: bool - :param wait_time_between_faults_in_seconds: Wait time (in seconds) between consecutive faults - within a single iteration. - The larger the value, the lower the overlapping between faults and the simpler the sequence of - state transitions that the cluster goes through. - The recommendation is to start with a value between 1 and 5 and exercise caution while moving - up. + :param wait_time_between_faults_in_seconds: Wait time (in seconds) between + consecutive faults within a single iteration. + The larger the value, the lower the overlapping between faults and the + simpler the sequence of state transitions that the cluster goes through. + The recommendation is to start with a value between 1 and 5 and exercise + caution while moving up. Default value: 20 . :type wait_time_between_faults_in_seconds: long - :param wait_time_between_iterations_in_seconds: Time-separation (in seconds) between two - consecutive iterations of Chaos. - The larger the value, the lower the fault injection rate. + :param wait_time_between_iterations_in_seconds: Time-separation (in + seconds) between two consecutive iterations of Chaos. + The larger the value, the lower the fault injection rate. Default value: + 30 . :type wait_time_between_iterations_in_seconds: long - :param cluster_health_policy: Passed-in cluster health policy is used to validate health of the - cluster in between Chaos iterations. If the cluster health is in error or if an unexpected - exception happens during fault execution--to provide the cluster with some time to - recuperate--Chaos will wait for 30 minutes before the next health-check. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param context: Describes a map, which is a collection of (string, string) type key-value - pairs. The map can be used to record information about - the Chaos run. There cannot be more than 100 such pairs and each string (key or value) can be - at most 4095 characters long. - This map is set by the starter of the Chaos run to optionally store the context about the - specific run. + :param cluster_health_policy: Passed-in cluster health policy is used to + validate health of the cluster in between Chaos iterations. If the cluster + health is in error or if an unexpected exception happens during fault + execution--to provide the cluster with some time to recuperate--Chaos will + wait for 30 minutes before the next health-check. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param context: Describes a map, which is a collection of (string, string) + type key-value pairs. The map can be used to record information about + the Chaos run. There cannot be more than 100 such pairs and each string + (key or value) can be at most 4095 characters long. + This map is set by the starter of the Chaos run to optionally store the + context about the specific run. :type context: ~azure.servicefabric.models.ChaosContext - :param chaos_target_filter: List of cluster entities to target for Chaos faults. - This filter can be used to target Chaos faults only to certain node types or only to certain - application instances. If ChaosTargetFilter is not used, Chaos faults all cluster entities. - If ChaosTargetFilter is used, Chaos faults only the entities that meet the ChaosTargetFilter - specification. + :param chaos_target_filter: List of cluster entities to target for Chaos + faults. + This filter can be used to target Chaos faults only to certain node types + or only to certain application instances. If ChaosTargetFilter is not + used, Chaos faults all cluster entities. + If ChaosTargetFilter is used, Chaos faults only the entities that meet the + ChaosTargetFilter specification. :type chaos_target_filter: ~azure.servicefabric.models.ChaosTargetFilter """ @@ -5200,20 +4354,7 @@ class ChaosParameters(msrest.serialization.Model): 'chaos_target_filter': {'key': 'ChaosTargetFilter', 'type': 'ChaosTargetFilter'}, } - def __init__( - self, - *, - time_to_run_in_seconds: Optional[str] = "4294967295", - max_cluster_stabilization_timeout_in_seconds: Optional[int] = 60, - max_concurrent_faults: Optional[int] = 1, - enable_move_replica_faults: Optional[bool] = True, - wait_time_between_faults_in_seconds: Optional[int] = 20, - wait_time_between_iterations_in_seconds: Optional[int] = 30, - cluster_health_policy: Optional["ClusterHealthPolicy"] = None, - context: Optional["ChaosContext"] = None, - chaos_target_filter: Optional["ChaosTargetFilter"] = None, - **kwargs - ): + def __init__(self, *, time_to_run_in_seconds: str="4294967295", max_cluster_stabilization_timeout_in_seconds: int=60, max_concurrent_faults: int=1, enable_move_replica_faults: bool=True, wait_time_between_faults_in_seconds: int=20, wait_time_between_iterations_in_seconds: int=30, cluster_health_policy=None, context=None, chaos_target_filter=None, **kwargs) -> None: super(ChaosParameters, self).__init__(**kwargs) self.time_to_run_in_seconds = time_to_run_in_seconds self.max_cluster_stabilization_timeout_in_seconds = max_cluster_stabilization_timeout_in_seconds @@ -5226,15 +4367,16 @@ def __init__( self.chaos_target_filter = chaos_target_filter -class ChaosParametersDictionaryItem(msrest.serialization.Model): +class ChaosParametersDictionaryItem(Model): """Defines an item in ChaosParametersDictionary of the Chaos Schedule. All required parameters must be populated in order to send to Azure. - :param key: Required. The key identifying the Chaos Parameter in the dictionary. This key is - referenced by Chaos Schedule Jobs. + :param key: Required. The key identifying the Chaos Parameter in the + dictionary. This key is referenced by Chaos Schedule Jobs. :type key: str - :param value: Required. Defines all the parameters to configure a Chaos run. + :param value: Required. Defines all the parameters to configure a Chaos + run. :type value: ~azure.servicefabric.models.ChaosParameters """ @@ -5248,13 +4390,7 @@ class ChaosParametersDictionaryItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'ChaosParameters'}, } - def __init__( - self, - *, - key: str, - value: "ChaosParameters", - **kwargs - ): + def __init__(self, *, key: str, value, **kwargs) -> None: super(ChaosParametersDictionaryItem, self).__init__(**kwargs) self.key = key self.value = value @@ -5264,82 +4400,57 @@ class PartitionEvent(FabricEvent): """Represents the base for all Partition Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ChaosPartitionPrimaryMoveScheduledEvent, ChaosPartitionSecondaryMoveScheduledEvent, PartitionAnalysisEvent, PartitionHealthReportExpiredEvent, PartitionNewHealthReportEvent, PartitionReconfiguredEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: PartitionAnalysisEvent, PartitionNewHealthReportEvent, + PartitionHealthReportExpiredEvent, PartitionReconfiguredEvent, + ChaosPartitionSecondaryMoveScheduledEvent, + ChaosPartitionPrimaryMoveScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ChaosPartitionPrimaryMoveScheduled': 'ChaosPartitionPrimaryMoveScheduledEvent', 'ChaosPartitionSecondaryMoveScheduled': 'ChaosPartitionSecondaryMoveScheduledEvent', 'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionNewHealthReport': 'PartitionNewHealthReportEvent', 'PartitionReconfigured': 'PartitionReconfiguredEvent'} - } - - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + 'kind': {'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionNewHealthReport': 'PartitionNewHealthReportEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionReconfigured': 'PartitionReconfiguredEvent', 'ChaosPartitionSecondaryMoveScheduled': 'ChaosPartitionSecondaryMoveScheduledEvent', 'ChaosPartitionPrimaryMoveScheduled': 'ChaosPartitionPrimaryMoveScheduledEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(PartitionEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'PartitionEvent' # type: str self.partition_id = partition_id + self.kind = 'PartitionEvent' class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): @@ -5347,42 +4458,23 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -5397,9 +4489,9 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -5409,11 +4501,11 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -5422,28 +4514,14 @@ class ChaosPartitionPrimaryMoveScheduledEvent(PartitionEvent): 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - fault_group_id: str, - fault_id: str, - service_name: str, - node_to: str, - forced_move: bool, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, fault_group_id: str, fault_id: str, service_name: str, node_to: str, forced_move: bool, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ChaosPartitionPrimaryMoveScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) - self.kind = 'ChaosPartitionPrimaryMoveScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.service_name = service_name self.node_to = node_to self.forced_move = forced_move + self.kind = 'ChaosPartitionPrimaryMoveScheduled' class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): @@ -5451,42 +4529,23 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -5503,9 +4562,9 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'fault_group_id': {'required': True}, 'fault_id': {'required': True}, @@ -5516,11 +4575,11 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, 'fault_id': {'key': 'FaultId', 'type': 'str'}, @@ -5530,122 +4589,84 @@ class ChaosPartitionSecondaryMoveScheduledEvent(PartitionEvent): 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - fault_group_id: str, - fault_id: str, - service_name: str, - source_node: str, - destination_node: str, - forced_move: bool, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, fault_group_id: str, fault_id: str, service_name: str, source_node: str, destination_node: str, forced_move: bool, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ChaosPartitionSecondaryMoveScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) - self.kind = 'ChaosPartitionSecondaryMoveScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.service_name = service_name self.source_node = source_node self.destination_node = destination_node self.forced_move = forced_move + self.kind = 'ChaosPartitionSecondaryMoveScheduled' class ReplicaEvent(FabricEvent): """Represents the base for all Replica Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ChaosReplicaRemovalScheduledEvent, ChaosReplicaRestartScheduledEvent, StatefulReplicaHealthReportExpiredEvent, StatefulReplicaNewHealthReportEvent, StatelessReplicaHealthReportExpiredEvent, StatelessReplicaNewHealthReportEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: StatefulReplicaNewHealthReportEvent, + StatefulReplicaHealthReportExpiredEvent, + StatelessReplicaNewHealthReportEvent, + StatelessReplicaHealthReportExpiredEvent, + ChaosReplicaRemovalScheduledEvent, ChaosReplicaRestartScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, } _subtype_map = { - 'kind': {'ChaosReplicaRemovalScheduled': 'ChaosReplicaRemovalScheduledEvent', 'ChaosReplicaRestartScheduled': 'ChaosReplicaRestartScheduledEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatefulReplicaNewHealthReport': 'StatefulReplicaNewHealthReportEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'StatelessReplicaNewHealthReport': 'StatelessReplicaNewHealthReportEvent'} - } - - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - replica_id: int, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + 'kind': {'StatefulReplicaNewHealthReport': 'StatefulReplicaNewHealthReportEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatelessReplicaNewHealthReport': 'StatelessReplicaNewHealthReportEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'ChaosReplicaRemovalScheduled': 'ChaosReplicaRemovalScheduledEvent', 'ChaosReplicaRestartScheduled': 'ChaosReplicaRestartScheduledEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ReplicaEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ReplicaEvent' # type: str self.partition_id = partition_id self.replica_id = replica_id + self.kind = 'ReplicaEvent' class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): @@ -5653,48 +4674,31 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -5705,9 +4709,9 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -5716,11 +4720,11 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, @@ -5728,25 +4732,12 @@ class ChaosReplicaRemovalScheduledEvent(ReplicaEvent): 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - replica_id: int, - fault_group_id: str, - fault_id: str, - service_uri: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, fault_group_id: str, fault_id: str, service_uri: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ChaosReplicaRemovalScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) - self.kind = 'ChaosReplicaRemovalScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.service_uri = service_uri + self.kind = 'ChaosReplicaRemovalScheduled' class ChaosReplicaRestartScheduledEvent(ReplicaEvent): @@ -5754,48 +4745,31 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param fault_group_id: Required. Id of fault group. :type fault_group_id: str @@ -5806,9 +4780,9 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'fault_group_id': {'required': True}, @@ -5817,11 +4791,11 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, @@ -5829,39 +4803,29 @@ class ChaosReplicaRestartScheduledEvent(ReplicaEvent): 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - replica_id: int, - fault_group_id: str, - fault_id: str, - service_uri: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, fault_group_id: str, fault_id: str, service_uri: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ChaosReplicaRestartScheduledEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) - self.kind = 'ChaosReplicaRestartScheduled' # type: str self.fault_group_id = fault_group_id self.fault_id = fault_id self.service_uri = service_uri + self.kind = 'ChaosReplicaRestartScheduled' -class ChaosSchedule(msrest.serialization.Model): +class ChaosSchedule(Model): """Defines the schedule used by Chaos. :param start_date: The date and time Chaos will start using this schedule. - :type start_date: ~datetime.datetime - :param expiry_date: The date and time Chaos will continue to use this schedule until. - :type expiry_date: ~datetime.datetime - :param chaos_parameters_dictionary: A mapping of string names to Chaos Parameters to be - referenced by Chaos Schedule Jobs. + Default value: "1601-01-01T00:00:00Z" . + :type start_date: datetime + :param expiry_date: The date and time Chaos will continue to use this + schedule until. Default value: "9999-12-31T23:59:59.999Z" . + :type expiry_date: datetime + :param chaos_parameters_dictionary: A mapping of string names to Chaos + Parameters to be referenced by Chaos Schedule Jobs. :type chaos_parameters_dictionary: list[~azure.servicefabric.models.ChaosParametersDictionaryItem] - :param jobs: A list of all Chaos Schedule Jobs that will be automated by the schedule. + :param jobs: A list of all Chaos Schedule Jobs that will be automated by + the schedule. :type jobs: list[~azure.servicefabric.models.ChaosScheduleJob] """ @@ -5872,15 +4836,7 @@ class ChaosSchedule(msrest.serialization.Model): 'jobs': {'key': 'Jobs', 'type': '[ChaosScheduleJob]'}, } - def __init__( - self, - *, - start_date: Optional[datetime.datetime] = "1601-01-01T00:00:00Z", - expiry_date: Optional[datetime.datetime] = "9999-12-31T23:59:59.999Z", - chaos_parameters_dictionary: Optional[List["ChaosParametersDictionaryItem"]] = None, - jobs: Optional[List["ChaosScheduleJob"]] = None, - **kwargs - ): + def __init__(self, *, start_date="1601-01-01T00:00:00Z", expiry_date="9999-12-31T23:59:59.999Z", chaos_parameters_dictionary=None, jobs=None, **kwargs) -> None: super(ChaosSchedule, self).__init__(**kwargs) self.start_date = start_date self.expiry_date = expiry_date @@ -5888,8 +4844,9 @@ def __init__( self.jobs = jobs -class ChaosScheduleDescription(msrest.serialization.Model): - """Defines the Chaos Schedule used by Chaos and the version of the Chaos Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. +class ChaosScheduleDescription(Model): + """Defines the Chaos Schedule used by Chaos and the version of the Chaos + Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. :param version: The version number of the Schedule. :type version: int @@ -5906,27 +4863,24 @@ class ChaosScheduleDescription(msrest.serialization.Model): 'schedule': {'key': 'Schedule', 'type': 'ChaosSchedule'}, } - def __init__( - self, - *, - version: Optional[int] = None, - schedule: Optional["ChaosSchedule"] = None, - **kwargs - ): + def __init__(self, *, version: int=None, schedule=None, **kwargs) -> None: super(ChaosScheduleDescription, self).__init__(**kwargs) self.version = version self.schedule = schedule -class ChaosScheduleJob(msrest.serialization.Model): - """Defines a repetition rule and parameters of Chaos to be used with the Chaos Schedule. +class ChaosScheduleJob(Model): + """Defines a repetition rule and parameters of Chaos to be used with the Chaos + Schedule. - :param chaos_parameters: A reference to which Chaos Parameters of the Chaos Schedule to use. + :param chaos_parameters: A reference to which Chaos Parameters of the + Chaos Schedule to use. :type chaos_parameters: str - :param days: Defines the days of the week that a Chaos Schedule Job will run for. + :param days: Defines the days of the week that a Chaos Schedule Job will + run for. :type days: ~azure.servicefabric.models.ChaosScheduleJobActiveDaysOfWeek - :param times: A list of Time Ranges that specify when during active days that this job will - run. The times are interpreted as UTC. + :param times: A list of Time Ranges that specify when during active days + that this job will run. The times are interpreted as UTC. :type times: list[~azure.servicefabric.models.TimeRange] """ @@ -5936,36 +4890,36 @@ class ChaosScheduleJob(msrest.serialization.Model): 'times': {'key': 'Times', 'type': '[TimeRange]'}, } - def __init__( - self, - *, - chaos_parameters: Optional[str] = None, - days: Optional["ChaosScheduleJobActiveDaysOfWeek"] = None, - times: Optional[List["TimeRange"]] = None, - **kwargs - ): + def __init__(self, *, chaos_parameters: str=None, days=None, times=None, **kwargs) -> None: super(ChaosScheduleJob, self).__init__(**kwargs) self.chaos_parameters = chaos_parameters self.days = days self.times = times -class ChaosScheduleJobActiveDaysOfWeek(msrest.serialization.Model): +class ChaosScheduleJobActiveDaysOfWeek(Model): """Defines the days of the week that a Chaos Schedule Job will run for. :param sunday: Indicates if the Chaos Schedule Job will run on Sunday. + Default value: False . :type sunday: bool :param monday: Indicates if the Chaos Schedule Job will run on Monday. + Default value: False . :type monday: bool :param tuesday: Indicates if the Chaos Schedule Job will run on Tuesday. + Default value: False . :type tuesday: bool - :param wednesday: Indicates if the Chaos Schedule Job will run on Wednesday. + :param wednesday: Indicates if the Chaos Schedule Job will run on + Wednesday. Default value: False . :type wednesday: bool :param thursday: Indicates if the Chaos Schedule Job will run on Thursday. + Default value: False . :type thursday: bool :param friday: Indicates if the Chaos Schedule Job will run on Friday. + Default value: False . :type friday: bool :param saturday: Indicates if the Chaos Schedule Job will run on Saturday. + Default value: False . :type saturday: bool """ @@ -5979,18 +4933,7 @@ class ChaosScheduleJobActiveDaysOfWeek(msrest.serialization.Model): 'saturday': {'key': 'Saturday', 'type': 'bool'}, } - def __init__( - self, - *, - sunday: Optional[bool] = False, - monday: Optional[bool] = False, - tuesday: Optional[bool] = False, - wednesday: Optional[bool] = False, - thursday: Optional[bool] = False, - friday: Optional[bool] = False, - saturday: Optional[bool] = False, - **kwargs - ): + def __init__(self, *, sunday: bool=False, monday: bool=False, tuesday: bool=False, wednesday: bool=False, thursday: bool=False, friday: bool=False, saturday: bool=False, **kwargs) -> None: super(ChaosScheduleJobActiveDaysOfWeek, self).__init__(**kwargs) self.sunday = sunday self.monday = monday @@ -6005,73 +4948,49 @@ class ClusterEvent(FabricEvent): """Represents the base for all Cluster Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ChaosStartedEvent, ChaosStoppedEvent, ClusterHealthReportExpiredEvent, ClusterNewHealthReportEvent, ClusterUpgradeCompletedEvent, ClusterUpgradeDomainCompletedEvent, ClusterUpgradeRollbackCompletedEvent, ClusterUpgradeRollbackStartedEvent, ClusterUpgradeStartedEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: ClusterNewHealthReportEvent, + ClusterHealthReportExpiredEvent, ClusterUpgradeCompletedEvent, + ClusterUpgradeDomainCompletedEvent, ClusterUpgradeRollbackCompletedEvent, + ClusterUpgradeRollbackStartedEvent, ClusterUpgradeStartedEvent, + ChaosStoppedEvent, ChaosStartedEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'ChaosStarted': 'ChaosStartedEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterNewHealthReport': 'ClusterNewHealthReportEvent', 'ClusterUpgradeCompleted': 'ClusterUpgradeCompletedEvent', 'ClusterUpgradeDomainCompleted': 'ClusterUpgradeDomainCompletedEvent', 'ClusterUpgradeRollbackCompleted': 'ClusterUpgradeRollbackCompletedEvent', 'ClusterUpgradeRollbackStarted': 'ClusterUpgradeRollbackStartedEvent', 'ClusterUpgradeStarted': 'ClusterUpgradeStartedEvent'} - } - - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + 'kind': {'ClusterNewHealthReport': 'ClusterNewHealthReportEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterUpgradeCompleted': 'ClusterUpgradeCompletedEvent', 'ClusterUpgradeDomainCompleted': 'ClusterUpgradeDomainCompletedEvent', 'ClusterUpgradeRollbackCompleted': 'ClusterUpgradeRollbackCompletedEvent', 'ClusterUpgradeRollbackStarted': 'ClusterUpgradeRollbackStartedEvent', 'ClusterUpgradeStarted': 'ClusterUpgradeStartedEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ChaosStarted': 'ChaosStartedEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ClusterEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ClusterEvent' # type: str + self.kind = 'ClusterEvent' class ChaosStartedEvent(ClusterEvent): @@ -6079,51 +4998,34 @@ class ChaosStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param max_concurrent_faults: Required. Maximum number of concurrent faults. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_concurrent_faults: Required. Maximum number of concurrent + faults. :type max_concurrent_faults: long :param time_to_run_in_seconds: Required. Time to run in seconds. :type time_to_run_in_seconds: float - :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum timeout for cluster - stabilization in seconds. + :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum + timeout for cluster stabilization in seconds. :type max_cluster_stabilization_timeout_in_seconds: float - :param wait_time_between_iterations_in_seconds: Required. Wait time between iterations in - seconds. + :param wait_time_between_iterations_in_seconds: Required. Wait time + between iterations in seconds. :type wait_time_between_iterations_in_seconds: float - :param wait_time_between_faults_in_seconds: Required. Wait time between faults in seconds. + :param wait_time_between_faults_in_seconds: Required. Wait time between + faults in seconds. :type wait_time_between_faults_in_seconds: float - :param move_replica_fault_enabled: Required. Indicates MoveReplica fault is enabled. + :param move_replica_fault_enabled: Required. Indicates MoveReplica fault + is enabled. :type move_replica_fault_enabled: bool :param included_node_type_list: Required. List of included Node types. :type included_node_type_list: str @@ -6136,9 +5038,9 @@ class ChaosStartedEvent(ClusterEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'max_concurrent_faults': {'required': True}, 'time_to_run_in_seconds': {'required': True}, 'max_cluster_stabilization_timeout_in_seconds': {'required': True}, @@ -6152,11 +5054,11 @@ class ChaosStartedEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_concurrent_faults': {'key': 'MaxConcurrentFaults', 'type': 'long'}, 'time_to_run_in_seconds': {'key': 'TimeToRunInSeconds', 'type': 'float'}, 'max_cluster_stabilization_timeout_in_seconds': {'key': 'MaxClusterStabilizationTimeoutInSeconds', 'type': 'float'}, @@ -6169,27 +5071,8 @@ class ChaosStartedEvent(ClusterEvent): 'chaos_context': {'key': 'ChaosContext', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - max_concurrent_faults: int, - time_to_run_in_seconds: float, - max_cluster_stabilization_timeout_in_seconds: float, - wait_time_between_iterations_in_seconds: float, - wait_time_between_faults_in_seconds: float, - move_replica_fault_enabled: bool, - included_node_type_list: str, - included_application_list: str, - cluster_health_policy: str, - chaos_context: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, max_concurrent_faults: int, time_to_run_in_seconds: float, max_cluster_stabilization_timeout_in_seconds: float, wait_time_between_iterations_in_seconds: float, wait_time_between_faults_in_seconds: float, move_replica_fault_enabled: bool, included_node_type_list: str, included_application_list: str, cluster_health_policy: str, chaos_context: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ChaosStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ChaosStarted' # type: str self.max_concurrent_faults = max_concurrent_faults self.time_to_run_in_seconds = time_to_run_in_seconds self.max_cluster_stabilization_timeout_in_seconds = max_cluster_stabilization_timeout_in_seconds @@ -6200,6 +5083,7 @@ def __init__( self.included_application_list = included_application_list self.cluster_health_policy = cluster_health_policy self.chaos_context = chaos_context + self.kind = 'ChaosStarted' class ChaosStoppedEvent(ClusterEvent): @@ -6207,106 +5091,96 @@ class ChaosStoppedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param reason: Required. Describes reason. :type reason: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'reason': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - reason: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, reason: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ChaosStoppedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ChaosStopped' # type: str self.reason = reason - - -class ChaosTargetFilter(msrest.serialization.Model): - """Defines all filters for targeted Chaos faults, for example, faulting only certain node types or faulting only certain applications. -If ChaosTargetFilter is not used, Chaos faults all cluster entities. If ChaosTargetFilter is used, Chaos faults only the entities that meet the ChaosTargetFilter -specification. NodeTypeInclusionList and ApplicationInclusionList allow a union semantics only. It is not possible to specify an intersection -of NodeTypeInclusionList and ApplicationInclusionList. For example, it is not possible to specify "fault this application only when it is on that node type." -Once an entity is included in either NodeTypeInclusionList or ApplicationInclusionList, that entity cannot be excluded using ChaosTargetFilter. Even if -applicationX does not appear in ApplicationInclusionList, in some Chaos iteration applicationX can be faulted because it happens to be on a node of nodeTypeY that is included -in NodeTypeInclusionList. If both NodeTypeInclusionList and ApplicationInclusionList are null or empty, an ArgumentException is thrown. - - :param node_type_inclusion_list: A list of node types to include in Chaos faults. - All types of faults (restart node, restart code package, remove replica, restart replica, move - primary, and move secondary) are enabled for the nodes of these node types. - If a node type (say NodeTypeX) does not appear in the NodeTypeInclusionList, then node level - faults (like NodeRestart) will never be enabled for the nodes of - NodeTypeX, but code package and replica faults can still be enabled for NodeTypeX if an - application in the ApplicationInclusionList. + self.kind = 'ChaosStopped' + + +class ChaosTargetFilter(Model): + """Defines all filters for targeted Chaos faults, for example, faulting only + certain node types or faulting only certain applications. + If ChaosTargetFilter is not used, Chaos faults all cluster entities. If + ChaosTargetFilter is used, Chaos faults only the entities that meet the + ChaosTargetFilter + specification. NodeTypeInclusionList and ApplicationInclusionList allow a + union semantics only. It is not possible to specify an intersection + of NodeTypeInclusionList and ApplicationInclusionList. For example, it is + not possible to specify "fault this application only when it is on that + node type." + Once an entity is included in either NodeTypeInclusionList or + ApplicationInclusionList, that entity cannot be excluded using + ChaosTargetFilter. Even if + applicationX does not appear in ApplicationInclusionList, in some Chaos + iteration applicationX can be faulted because it happens to be on a node of + nodeTypeY that is included + in NodeTypeInclusionList. If both NodeTypeInclusionList and + ApplicationInclusionList are null or empty, an ArgumentException is thrown. + + :param node_type_inclusion_list: A list of node types to include in Chaos + faults. + All types of faults (restart node, restart code package, remove replica, + restart replica, move primary, and move secondary) are enabled for the + nodes of these node types. + If a node type (say NodeTypeX) does not appear in the + NodeTypeInclusionList, then node level faults (like NodeRestart) will + never be enabled for the nodes of + NodeTypeX, but code package and replica faults can still be enabled for + NodeTypeX if an application in the ApplicationInclusionList. happens to reside on a node of NodeTypeX. - At most 100 node type names can be included in this list, to increase this number, a config - upgrade is required for MaxNumberOfNodeTypesInChaosEntityFilter configuration. + At most 100 node type names can be included in this list, to increase this + number, a config upgrade is required for + MaxNumberOfNodeTypesInChaosEntityFilter configuration. :type node_type_inclusion_list: list[str] - :param application_inclusion_list: A list of application URIs to include in Chaos faults. - All replicas belonging to services of these applications are amenable to replica faults - (restart replica, remove replica, move primary, and move secondary) by Chaos. - Chaos may restart a code package only if the code package hosts replicas of these applications - only. - If an application does not appear in this list, it can still be faulted in some Chaos - iteration if the application ends up on a node of a node type that is included in - NodeTypeInclusionList. - However, if applicationX is tied to nodeTypeY through placement constraints and applicationX - is absent from ApplicationInclusionList and nodeTypeY is absent from NodeTypeInclusionList, - then applicationX will never be faulted. - At most 1000 application names can be included in this list, to increase this number, a config - upgrade is required for MaxNumberOfApplicationsInChaosEntityFilter configuration. + :param application_inclusion_list: A list of application URIs to include + in Chaos faults. + All replicas belonging to services of these applications are amenable to + replica faults (restart replica, remove replica, move primary, and move + secondary) by Chaos. + Chaos may restart a code package only if the code package hosts replicas + of these applications only. + If an application does not appear in this list, it can still be faulted in + some Chaos iteration if the application ends up on a node of a node type + that is included in NodeTypeInclusionList. + However, if applicationX is tied to nodeTypeY through placement + constraints and applicationX is absent from ApplicationInclusionList and + nodeTypeY is absent from NodeTypeInclusionList, then applicationX will + never be faulted. + At most 1000 application names can be included in this list, to increase + this number, a config upgrade is required for + MaxNumberOfApplicationsInChaosEntityFilter configuration. :type application_inclusion_list: list[str] """ @@ -6315,194 +5189,170 @@ class ChaosTargetFilter(msrest.serialization.Model): 'application_inclusion_list': {'key': 'ApplicationInclusionList', 'type': '[str]'}, } - def __init__( - self, - *, - node_type_inclusion_list: Optional[List[str]] = None, - application_inclusion_list: Optional[List[str]] = None, - **kwargs - ): + def __init__(self, *, node_type_inclusion_list=None, application_inclusion_list=None, **kwargs) -> None: super(ChaosTargetFilter, self).__init__(**kwargs) self.node_type_inclusion_list = node_type_inclusion_list self.application_inclusion_list = application_inclusion_list -class PropertyBatchOperation(msrest.serialization.Model): - """Represents the base type for property operations that can be put into a batch and submitted. +class PropertyBatchOperation(Model): + """Represents the base type for property operations that can be put into a + batch and submitted. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CheckExistsPropertyBatchOperation, CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation, DeletePropertyBatchOperation, GetPropertyBatchOperation, PutPropertyBatchOperation. + sub-classes are: CheckExistsPropertyBatchOperation, + CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation, + DeletePropertyBatchOperation, GetPropertyBatchOperation, + PutPropertyBatchOperation All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'CheckExists': 'CheckExistsPropertyBatchOperation', 'CheckSequence': 'CheckSequencePropertyBatchOperation', 'CheckValue': 'CheckValuePropertyBatchOperation', 'Delete': 'DeletePropertyBatchOperation', 'Get': 'GetPropertyBatchOperation', 'Put': 'PutPropertyBatchOperation'} } - def __init__( - self, - *, - property_name: str, - **kwargs - ): + def __init__(self, *, property_name: str, **kwargs) -> None: super(PropertyBatchOperation, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.property_name = property_name + self.kind = None class CheckExistsPropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that compares the Boolean existence of a property with the Exists argument. -The PropertyBatchOperation operation fails if the property's existence is not equal to the Exists argument. -The CheckExistsPropertyBatchOperation is generally used as a precondition for the write operations in the batch. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that compares the Boolean existence of + a property with the Exists argument. + The PropertyBatchOperation operation fails if the property's existence is + not equal to the Exists argument. + The CheckExistsPropertyBatchOperation is generally used as a precondition + for the write operations in the batch. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param exists: Required. Whether or not the property should exist for the operation to pass. + :param kind: Required. Constant filled by server. + :type kind: str + :param exists: Required. Whether or not the property should exist for the + operation to pass. :type exists: bool """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, 'exists': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'exists': {'key': 'Exists', 'type': 'bool'}, } - def __init__( - self, - *, - property_name: str, - exists: bool, - **kwargs - ): + def __init__(self, *, property_name: str, exists: bool, **kwargs) -> None: super(CheckExistsPropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) - self.kind = 'CheckExists' # type: str self.exists = exists + self.kind = 'CheckExists' class CheckSequencePropertyBatchOperation(PropertyBatchOperation): - """Compares the Sequence Number of a property with the SequenceNumber argument. -A property's sequence number can be thought of as that property's version. -Every time the property is modified, its sequence number is increased. -The sequence number can be found in a property's metadata. -The comparison fails if the sequence numbers are not equal. -CheckSequencePropertyBatchOperation is generally used as a precondition for the write operations in the batch. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Compares the Sequence Number of a property with the SequenceNumber + argument. + A property's sequence number can be thought of as that property's version. + Every time the property is modified, its sequence number is increased. + The sequence number can be found in a property's metadata. + The comparison fails if the sequence numbers are not equal. + CheckSequencePropertyBatchOperation is generally used as a precondition for + the write operations in the batch. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str :param sequence_number: Required. The expected sequence number. :type sequence_number: str """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, 'sequence_number': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__( - self, - *, - property_name: str, - sequence_number: str, - **kwargs - ): + def __init__(self, *, property_name: str, sequence_number: str, **kwargs) -> None: super(CheckSequencePropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) - self.kind = 'CheckSequence' # type: str self.sequence_number = sequence_number + self.kind = 'CheckSequence' class CheckValuePropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that compares the value of the property with the expected value. -The CheckValuePropertyBatchOperation is generally used as a precondition for the write operations in the batch. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that compares the value of the property + with the expected value. + The CheckValuePropertyBatchOperation is generally used as a precondition + for the write operations in the batch. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str :param value: Required. The expected property value. :type value: ~azure.servicefabric.models.PropertyValue """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, 'value': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__( - self, - *, - property_name: str, - value: "PropertyValue", - **kwargs - ): + def __init__(self, *, property_name: str, value, **kwargs) -> None: super(CheckValuePropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) - self.kind = 'CheckValue' # type: str self.value = value + self.kind = 'CheckValue' -class ClusterConfiguration(msrest.serialization.Model): +class ClusterConfiguration(Model): """Information about the standalone cluster configuration. - :param cluster_configuration: The contents of the cluster configuration file. + :param cluster_configuration: The contents of the cluster configuration + file. :type cluster_configuration: str """ @@ -6510,56 +5360,62 @@ class ClusterConfiguration(msrest.serialization.Model): 'cluster_configuration': {'key': 'ClusterConfiguration', 'type': 'str'}, } - def __init__( - self, - *, - cluster_configuration: Optional[str] = None, - **kwargs - ): + def __init__(self, *, cluster_configuration: str=None, **kwargs) -> None: super(ClusterConfiguration, self).__init__(**kwargs) self.cluster_configuration = cluster_configuration -class ClusterConfigurationUpgradeDescription(msrest.serialization.Model): +class ClusterConfigurationUpgradeDescription(Model): """Describes the parameters for a standalone cluster configuration upgrade. All required parameters must be populated in order to send to Azure. - :param cluster_config: Required. The cluster configuration as a JSON string. For example, `this - file - `_ - contains JSON describing the `nodes and other properties of the cluster - `_. + :param cluster_config: Required. The cluster configuration as a JSON + string. For example, [this + file](https://github.com/Azure-Samples/service-fabric-dotnet-standalone-cluster-configuration/blob/master/Samples/ClusterConfig.Unsecure.DevCluster.json) + contains JSON describing the [nodes and other properties of the + cluster](https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-manifest). :type cluster_config: str - :param health_check_retry_timeout: The length of time between attempts to perform health checks - if the application or cluster is not healthy. - :type health_check_retry_timeout: ~datetime.timedelta - :param health_check_wait_duration_in_seconds: The length of time to wait after completing an - upgrade domain before starting the health checks process. - :type health_check_wait_duration_in_seconds: ~datetime.timedelta - :param health_check_stable_duration_in_seconds: The length of time that the application or - cluster must remain healthy before the upgrade proceeds to the next upgrade domain. - :type health_check_stable_duration_in_seconds: ~datetime.timedelta - :param upgrade_domain_timeout_in_seconds: The timeout for the upgrade domain. - :type upgrade_domain_timeout_in_seconds: ~datetime.timedelta - :param upgrade_timeout_in_seconds: The upgrade timeout. - :type upgrade_timeout_in_seconds: ~datetime.timedelta - :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy - applications during the upgrade. Allowed values are integer values from zero to 100. + :param health_check_retry_timeout: The length of time between attempts to + perform health checks if the application or cluster is not healthy. + Default value: "PT0H0M0S" . + :type health_check_retry_timeout: timedelta + :param health_check_wait_duration_in_seconds: The length of time to wait + after completing an upgrade domain before starting the health checks + process. Default value: "PT0H0M0S" . + :type health_check_wait_duration_in_seconds: timedelta + :param health_check_stable_duration_in_seconds: The length of time that + the application or cluster must remain healthy before the upgrade proceeds + to the next upgrade domain. Default value: "PT0H0M0S" . + :type health_check_stable_duration_in_seconds: timedelta + :param upgrade_domain_timeout_in_seconds: The timeout for the upgrade + domain. Default value: "PT0H0M0S" . + :type upgrade_domain_timeout_in_seconds: timedelta + :param upgrade_timeout_in_seconds: The upgrade timeout. Default value: + "PT0H0M0S" . + :type upgrade_timeout_in_seconds: timedelta + :param max_percent_unhealthy_applications: The maximum allowed percentage + of unhealthy applications during the upgrade. Allowed values are integer + values from zero to 100. Default value: 0 . :type max_percent_unhealthy_applications: int - :param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes during - the upgrade. Allowed values are integer values from zero to 100. + :param max_percent_unhealthy_nodes: The maximum allowed percentage of + unhealthy nodes during the upgrade. Allowed values are integer values from + zero to 100. Default value: 0 . :type max_percent_unhealthy_nodes: int - :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage of delta health - degradation during the upgrade. Allowed values are integer values from zero to 100. + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage + of delta health degradation during the upgrade. Allowed values are integer + values from zero to 100. Default value: 0 . :type max_percent_delta_unhealthy_nodes: int - :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum allowed percentage of - upgrade domain delta health degradation during the upgrade. Allowed values are integer values - from zero to 100. + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum + allowed percentage of upgrade domain delta health degradation during the + upgrade. Allowed values are integer values from zero to 100. Default + value: 0 . :type max_percent_upgrade_domain_delta_unhealthy_nodes: int - :param application_health_policies: Defines the application health policy map used to evaluate - the health of an application or one of its children entities. - :type application_health_policies: ~azure.servicefabric.models.ApplicationHealthPolicies + :param application_health_policies: Defines the application health policy + map used to evaluate the health of an application or one of its children + entities. + :type application_health_policies: + ~azure.servicefabric.models.ApplicationHealthPolicies """ _validation = { @@ -6580,22 +5436,7 @@ class ClusterConfigurationUpgradeDescription(msrest.serialization.Model): 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, } - def __init__( - self, - *, - cluster_config: str, - health_check_retry_timeout: Optional[datetime.timedelta] = "PT0H0M0S", - health_check_wait_duration_in_seconds: Optional[datetime.timedelta] = "PT0H0M0S", - health_check_stable_duration_in_seconds: Optional[datetime.timedelta] = "PT0H0M0S", - upgrade_domain_timeout_in_seconds: Optional[datetime.timedelta] = "PT0H0M0S", - upgrade_timeout_in_seconds: Optional[datetime.timedelta] = "PT0H0M0S", - max_percent_unhealthy_applications: Optional[int] = 0, - max_percent_unhealthy_nodes: Optional[int] = 0, - max_percent_delta_unhealthy_nodes: Optional[int] = 0, - max_percent_upgrade_domain_delta_unhealthy_nodes: Optional[int] = 0, - application_health_policies: Optional["ApplicationHealthPolicies"] = None, - **kwargs - ): + def __init__(self, *, cluster_config: str, health_check_retry_timeout="PT0H0M0S", health_check_wait_duration_in_seconds="PT0H0M0S", health_check_stable_duration_in_seconds="PT0H0M0S", upgrade_domain_timeout_in_seconds="PT0H0M0S", upgrade_timeout_in_seconds="PT0H0M0S", max_percent_unhealthy_applications: int=0, max_percent_unhealthy_nodes: int=0, max_percent_delta_unhealthy_nodes: int=0, max_percent_upgrade_domain_delta_unhealthy_nodes: int=0, application_health_policies=None, **kwargs) -> None: super(ClusterConfigurationUpgradeDescription, self).__init__(**kwargs) self.cluster_config = cluster_config self.health_check_retry_timeout = health_check_retry_timeout @@ -6610,12 +5451,13 @@ def __init__( self.application_health_policies = application_health_policies -class ClusterConfigurationUpgradeStatusInfo(msrest.serialization.Model): +class ClusterConfigurationUpgradeStatusInfo(Model): """Information about a standalone cluster configuration upgrade status. - :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", - "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", - "RollingForwardInProgress", "RollingForwardCompleted", "Failed". + :param upgrade_state: The state of the upgrade domain. Possible values + include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', + 'RollingForwardPending', 'RollingForwardInProgress', + 'RollingForwardCompleted', 'Failed' :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState :param progress_status: The cluster manifest version. :type progress_status: int @@ -6632,15 +5474,7 @@ class ClusterConfigurationUpgradeStatusInfo(msrest.serialization.Model): 'details': {'key': 'Details', 'type': 'str'}, } - def __init__( - self, - *, - upgrade_state: Optional[Union[str, "UpgradeState"]] = None, - progress_status: Optional[int] = None, - config_version: Optional[str] = None, - details: Optional[str] = None, - **kwargs - ): + def __init__(self, *, upgrade_state=None, progress_status: int=None, config_version: str=None, details: str=None, **kwargs) -> None: super(ClusterConfigurationUpgradeStatusInfo, self).__init__(**kwargs) self.upgrade_state = upgrade_state self.progress_status = progress_status @@ -6650,28 +5484,35 @@ def __init__( class ClusterHealth(EntityHealth): """Represents the health of the cluster. -Contains the cluster aggregated health state, the cluster application and node health states as well as the health events and the unhealthy evaluations. - - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + Contains the cluster aggregated health state, the cluster application and + node health states as well as the health events and the unhealthy + evaluations. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param node_health_states: Cluster node health states as found in the health store. - :type node_health_states: list[~azure.servicefabric.models.NodeHealthState] - :param application_health_states: Cluster application health states as found in the health - store. - :type application_health_states: list[~azure.servicefabric.models.ApplicationHealthState] + :param node_health_states: Cluster node health states as found in the + health store. + :type node_health_states: + list[~azure.servicefabric.models.NodeHealthState] + :param application_health_states: Cluster application health states as + found in the health store. + :type application_health_states: + list[~azure.servicefabric.models.ApplicationHealthState] """ _attribute_map = { @@ -6683,38 +5524,33 @@ class ClusterHealth(EntityHealth): 'application_health_states': {'key': 'ApplicationHealthStates', 'type': '[ApplicationHealthState]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - node_health_states: Optional[List["NodeHealthState"]] = None, - application_health_states: Optional[List["ApplicationHealthState"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, node_health_states=None, application_health_states=None, **kwargs) -> None: super(ClusterHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.node_health_states = node_health_states self.application_health_states = application_health_states -class ClusterHealthChunk(msrest.serialization.Model): +class ClusterHealthChunk(Model): """Represents the health chunk of the cluster. -Contains the cluster aggregated health state, and the cluster entities that respect the input filter. - - :param health_state: The HealthState representing the aggregated health state of the cluster - computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired cluster health policy and the application - health policies. Possible values include: "Invalid", "Ok", "Warning", "Error", "Unknown". + Contains the cluster aggregated health state, and the cluster entities that + respect the input filter. + + :param health_state: The HealthState representing the aggregated health + state of the cluster computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired cluster health policy and + the application health policies. Possible values include: 'Invalid', 'Ok', + 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param node_health_state_chunks: The list of node health state chunks in the cluster that - respect the filters in the cluster health chunk query description. - :type node_health_state_chunks: ~azure.servicefabric.models.NodeHealthStateChunkList - :param application_health_state_chunks: The list of application health state chunks in the - cluster that respect the filters in the cluster health chunk query description. + :param node_health_state_chunks: The list of node health state chunks in + the cluster that respect the filters in the cluster health chunk query + description. + :type node_health_state_chunks: + ~azure.servicefabric.models.NodeHealthStateChunkList + :param application_health_state_chunks: The list of application health + state chunks in the cluster that respect the filters in the cluster health + chunk query description. :type application_health_state_chunks: ~azure.servicefabric.models.ApplicationHealthStateChunkList """ @@ -6725,45 +5561,49 @@ class ClusterHealthChunk(msrest.serialization.Model): 'application_health_state_chunks': {'key': 'ApplicationHealthStateChunks', 'type': 'ApplicationHealthStateChunkList'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - node_health_state_chunks: Optional["NodeHealthStateChunkList"] = None, - application_health_state_chunks: Optional["ApplicationHealthStateChunkList"] = None, - **kwargs - ): + def __init__(self, *, health_state=None, node_health_state_chunks=None, application_health_state_chunks=None, **kwargs) -> None: super(ClusterHealthChunk, self).__init__(**kwargs) self.health_state = health_state self.node_health_state_chunks = node_health_state_chunks self.application_health_state_chunks = application_health_state_chunks -class ClusterHealthChunkQueryDescription(msrest.serialization.Model): - """The cluster health chunk query description, which can specify the health policies to evaluate cluster health and very expressive filters to select which cluster entities to include in response. +class ClusterHealthChunkQueryDescription(Model): + """The cluster health chunk query description, which can specify the health + policies to evaluate cluster health and very expressive filters to select + which cluster entities to include in response. - :param node_filters: Defines a list of filters that specify which nodes to be included in the - returned cluster health chunk. - If no filters are specified, no nodes are returned. All the nodes are used to evaluate the - cluster's aggregated health state, regardless of the input filters. + :param node_filters: Defines a list of filters that specify which nodes to + be included in the returned cluster health chunk. + If no filters are specified, no nodes are returned. All the nodes are used + to evaluate the cluster's aggregated health state, regardless of the input + filters. The cluster health chunk query may specify multiple node filters. - For example, it can specify a filter to return all nodes with health state Error and another - filter to always include a node identified by its NodeName. - :type node_filters: list[~azure.servicefabric.models.NodeHealthStateFilter] - :param application_filters: Defines a list of filters that specify which applications to be - included in the returned cluster health chunk. - If no filters are specified, no applications are returned. All the applications are used to - evaluate the cluster's aggregated health state, regardless of the input filters. + For example, it can specify a filter to return all nodes with health state + Error and another filter to always include a node identified by its + NodeName. + :type node_filters: + list[~azure.servicefabric.models.NodeHealthStateFilter] + :param application_filters: Defines a list of filters that specify which + applications to be included in the returned cluster health chunk. + If no filters are specified, no applications are returned. All the + applications are used to evaluate the cluster's aggregated health state, + regardless of the input filters. The cluster health chunk query may specify multiple application filters. - For example, it can specify a filter to return all applications with health state Error and - another filter to always include applications of a specified application type. - :type application_filters: list[~azure.servicefabric.models.ApplicationHealthStateFilter] - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param application_health_policies: Defines the application health policy map used to evaluate - the health of an application or one of its children entities. - :type application_health_policies: ~azure.servicefabric.models.ApplicationHealthPolicies + For example, it can specify a filter to return all applications with + health state Error and another filter to always include applications of a + specified application type. + :type application_filters: + list[~azure.servicefabric.models.ApplicationHealthStateFilter] + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param application_health_policies: Defines the application health policy + map used to evaluate the health of an application or one of its children + entities. + :type application_health_policies: + ~azure.servicefabric.models.ApplicationHealthPolicies """ _attribute_map = { @@ -6773,15 +5613,7 @@ class ClusterHealthChunkQueryDescription(msrest.serialization.Model): 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, } - def __init__( - self, - *, - node_filters: Optional[List["NodeHealthStateFilter"]] = None, - application_filters: Optional[List["ApplicationHealthStateFilter"]] = None, - cluster_health_policy: Optional["ClusterHealthPolicy"] = None, - application_health_policies: Optional["ApplicationHealthPolicies"] = None, - **kwargs - ): + def __init__(self, *, node_filters=None, application_filters=None, cluster_health_policy=None, application_health_policies=None, **kwargs) -> None: super(ClusterHealthChunkQueryDescription, self).__init__(**kwargs) self.node_filters = node_filters self.application_filters = application_filters @@ -6789,22 +5621,24 @@ def __init__( self.application_health_policies = application_health_policies -class ClusterHealthPolicies(msrest.serialization.Model): +class ClusterHealthPolicies(Model): """Health policies to evaluate cluster health. - :param application_health_policy_map: Defines a map that contains specific application health - policies for different applications. - Each entry specifies as key the application name and as value an ApplicationHealthPolicy used - to evaluate the application health. - If an application is not specified in the map, the application health evaluation uses the - ApplicationHealthPolicy found in its application manifest or the default application health - policy (if no health policy is defined in the manifest). + :param application_health_policy_map: Defines a map that contains specific + application health policies for different applications. + Each entry specifies as key the application name and as value an + ApplicationHealthPolicy used to evaluate the application health. + If an application is not specified in the map, the application health + evaluation uses the ApplicationHealthPolicy found in its application + manifest or the default application health policy (if no health policy is + defined in the manifest). The map is empty by default. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy """ _attribute_map = { @@ -6812,107 +5646,108 @@ class ClusterHealthPolicies(msrest.serialization.Model): 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, } - def __init__( - self, - *, - application_health_policy_map: Optional[List["ApplicationHealthPolicyMapItem"]] = None, - cluster_health_policy: Optional["ClusterHealthPolicy"] = None, - **kwargs - ): + def __init__(self, *, application_health_policy_map=None, cluster_health_policy=None, **kwargs) -> None: super(ClusterHealthPolicies, self).__init__(**kwargs) self.application_health_policy_map = application_health_policy_map self.cluster_health_policy = cluster_health_policy -class ClusterHealthPolicy(msrest.serialization.Model): - """Defines a health policy used to evaluate the health of the cluster or of a cluster node. +class ClusterHealthPolicy(Model): + """Defines a health policy used to evaluate the health of the cluster or of a + cluster node. - :param consider_warning_as_error: Indicates whether warnings are treated with the same severity - as errors. + :param consider_warning_as_error: Indicates whether warnings are treated + with the same severity as errors. Default value: False . :type consider_warning_as_error: bool - :param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes before - reporting an error. For example, to allow 10% of nodes to be unhealthy, this value would be 10. - - The percentage represents the maximum tolerated percentage of nodes that can be unhealthy - before the cluster is considered in error. - If the percentage is respected but there is at least one unhealthy node, the health is - evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy nodes over the total number - of nodes in the cluster. - The computation rounds up to tolerate one failure on small numbers of nodes. Default - percentage is zero. - - In large clusters, some nodes will always be down or out for repairs, so this percentage - should be configured to tolerate that. + :param max_percent_unhealthy_nodes: The maximum allowed percentage of + unhealthy nodes before reporting an error. For example, to allow 10% of + nodes to be unhealthy, this value would be 10. + The percentage represents the maximum tolerated percentage of nodes that + can be unhealthy before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy node, + the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy nodes + over the total number of nodes in the cluster. + The computation rounds up to tolerate one failure on small numbers of + nodes. Default percentage is zero. + In large clusters, some nodes will always be down or out for repairs, so + this percentage should be configured to tolerate that. Default value: 0 . :type max_percent_unhealthy_nodes: int - :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy - applications before reporting an error. For example, to allow 10% of applications to be - unhealthy, this value would be 10. - - The percentage represents the maximum tolerated percentage of applications that can be - unhealthy before the cluster is considered in error. - If the percentage is respected but there is at least one unhealthy application, the health is - evaluated as Warning. - This is calculated by dividing the number of unhealthy applications over the total number of - application instances in the cluster, excluding applications of application types that are - included in the ApplicationTypeHealthPolicyMap. - The computation rounds up to tolerate one failure on small numbers of applications. Default - percentage is zero. + :param max_percent_unhealthy_applications: The maximum allowed percentage + of unhealthy applications before reporting an error. For example, to allow + 10% of applications to be unhealthy, this value would be 10. + The percentage represents the maximum tolerated percentage of applications + that can be unhealthy before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy + application, the health is evaluated as Warning. + This is calculated by dividing the number of unhealthy applications over + the total number of application instances in the cluster, excluding + applications of application types that are included in the + ApplicationTypeHealthPolicyMap. + The computation rounds up to tolerate one failure on small numbers of + applications. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_applications: int - :param application_type_health_policy_map: Defines a map with max percentage unhealthy - applications for specific application types. - Each entry specifies as key the application type name and as value an integer that represents - the MaxPercentUnhealthyApplications percentage used to evaluate the applications of the - specified application type. - - The application type health policy map can be used during cluster health evaluation to - describe special application types. - The application types included in the map are evaluated against the percentage specified in - the map, and not with the global MaxPercentUnhealthyApplications defined in the cluster health - policy. - The applications of application types specified in the map are not counted against the global - pool of applications. - For example, if some applications of a type are critical, the cluster administrator can add an - entry to the map for that application type + :param application_type_health_policy_map: Defines a map with max + percentage unhealthy applications for specific application types. + Each entry specifies as key the application type name and as value an + integer that represents the MaxPercentUnhealthyApplications percentage + used to evaluate the applications of the specified application type. + The application type health policy map can be used during cluster health + evaluation to describe special application types. + The application types included in the map are evaluated against the + percentage specified in the map, and not with the global + MaxPercentUnhealthyApplications defined in the cluster health policy. + The applications of application types specified in the map are not counted + against the global pool of applications. + For example, if some applications of a type are critical, the cluster + administrator can add an entry to the map for that application type and assign it a value of 0% (that is, do not tolerate any failures). - All other applications can be evaluated with MaxPercentUnhealthyApplications set to 20% to - tolerate some failures out of the thousands of application instances. - The application type health policy map is used only if the cluster manifest enables - application type health evaluation using the configuration entry for + All other applications can be evaluated with + MaxPercentUnhealthyApplications set to 20% to tolerate some failures out + of the thousands of application instances. + The application type health policy map is used only if the cluster + manifest enables application type health evaluation using the + configuration entry for HealthManager/EnableApplicationTypeHealthEvaluation. :type application_type_health_policy_map: list[~azure.servicefabric.models.ApplicationTypeHealthPolicyMapItem] - :param node_type_health_policy_map: Defines a map with max percentage unhealthy nodes for - specific node types. - Each entry specifies as key the node type name and as value an integer that represents the - MaxPercentUnhealthyNodes percentage used to evaluate the nodes of the specified node type. - - The node type health policy map can be used during cluster health evaluation to describe - special node types. - They are evaluated against the percentages associated with their node type name in the map. - Setting this has no impact on the global pool of nodes used for MaxPercentUnhealthyNodes. - The node type health policy map is used only if the cluster manifest enables node type health - evaluation using the configuration entry for HealthManager/EnableNodeTypeHealthEvaluation. - - For example, given a cluster with many nodes of different types, with important work hosted on - node type "SpecialNodeType" that should not tolerate any nodes down. - You can specify global MaxPercentUnhealthyNodes to 20% to tolerate some failures for all - nodes, but for the node type "SpecialNodeType", set the MaxPercentUnhealthyNodes to 0 by - setting the value in the key value pair in NodeTypeHealthPolicyMapItem. The key is the node - type name. - This way, as long as no nodes of type "SpecialNodeType" are in Error state, - even if some of the many nodes in the global pool are in Error state, but below the global - unhealthy percentage, the cluster would be evaluated to Warning. - A Warning health state does not impact cluster upgrade or other monitoring triggered by Error - health state. - But even one node of type SpecialNodeType in Error would make cluster unhealthy (in Error - rather than Warning/Ok), which triggers rollback or pauses the cluster upgrade, depending on - the upgrade configuration. - - Conversely, setting the global MaxPercentUnhealthyNodes to 0, and setting SpecialNodeType's - max percent unhealthy nodes to 100, - with one node of type SpecialNodeType in Error state would still put the cluster in an Error - state, since the global restriction is more strict in this case. + :param node_type_health_policy_map: Defines a map with max percentage + unhealthy nodes for specific node types. + Each entry specifies as key the node type name and as value an integer + that represents the MaxPercentUnhealthyNodes percentage used to evaluate + the nodes of the specified node type. + The node type health policy map can be used during cluster health + evaluation to describe special node types. + They are evaluated against the percentages associated with their node type + name in the map. + Setting this has no impact on the global pool of nodes used for + MaxPercentUnhealthyNodes. + The node type health policy map is used only if the cluster manifest + enables node type health evaluation using the configuration entry for + HealthManager/EnableNodeTypeHealthEvaluation. + For example, given a cluster with many nodes of different types, with + important work hosted on node type "SpecialNodeType" that should not + tolerate any nodes down. + You can specify global MaxPercentUnhealthyNodes to 20% to tolerate some + failures for all nodes, but for the node type "SpecialNodeType", set the + MaxPercentUnhealthyNodes to 0 by + setting the value in the key value pair in NodeTypeHealthPolicyMapItem. + The key is the node type name. + This way, as long as no nodes of type "SpecialNodeType" are in Error + state, + even if some of the many nodes in the global pool are in Error state, but + below the global unhealthy percentage, the cluster would be evaluated to + Warning. + A Warning health state does not impact cluster upgrade or other monitoring + triggered by Error health state. + But even one node of type SpecialNodeType in Error would make cluster + unhealthy (in Error rather than Warning/Ok), which triggers rollback or + pauses the cluster upgrade, depending on the upgrade configuration. + Conversely, setting the global MaxPercentUnhealthyNodes to 0, and setting + SpecialNodeType's max percent unhealthy nodes to 100, + with one node of type SpecialNodeType in Error state would still put the + cluster in an Error state, since the global restriction is more strict in + this case. :type node_type_health_policy_map: list[~azure.servicefabric.models.NodeTypeHealthPolicyMapItem] """ @@ -6925,16 +5760,7 @@ class ClusterHealthPolicy(msrest.serialization.Model): 'node_type_health_policy_map': {'key': 'NodeTypeHealthPolicyMap', 'type': '[NodeTypeHealthPolicyMapItem]'}, } - def __init__( - self, - *, - consider_warning_as_error: Optional[bool] = False, - max_percent_unhealthy_nodes: Optional[int] = 0, - max_percent_unhealthy_applications: Optional[int] = 0, - application_type_health_policy_map: Optional[List["ApplicationTypeHealthPolicyMapItem"]] = None, - node_type_health_policy_map: Optional[List["NodeTypeHealthPolicyMapItem"]] = None, - **kwargs - ): + def __init__(self, *, consider_warning_as_error: bool=False, max_percent_unhealthy_nodes: int=0, max_percent_unhealthy_applications: int=0, application_type_health_policy_map=None, node_type_health_policy_map=None, **kwargs) -> None: super(ClusterHealthPolicy, self).__init__(**kwargs) self.consider_warning_as_error = consider_warning_as_error self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes @@ -6948,38 +5774,18 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param source_id: Required. Id of report source. :type source_id: str :param property: Required. Describes the property. @@ -6992,16 +5798,17 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, 'health_state': {'required': True}, @@ -7013,11 +5820,11 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, @@ -7028,25 +5835,8 @@ class ClusterHealthReportExpiredEvent(ClusterEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ClusterHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ClusterHealthReportExpired' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -7055,18 +5845,23 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ClusterHealthReportExpired' -class ClusterLoadInfo(msrest.serialization.Model): - """Information about load in a Service Fabric cluster. It holds a summary of all metrics and their load in a cluster. +class ClusterLoadInfo(Model): + """Information about load in a Service Fabric cluster. It holds a summary of + all metrics and their load in a cluster. - :param last_balancing_start_time_utc: The starting time of last resource balancing run. - :type last_balancing_start_time_utc: ~datetime.datetime - :param last_balancing_end_time_utc: The end time of last resource balancing run. - :type last_balancing_end_time_utc: ~datetime.datetime - :param load_metric_information: List that contains metrics and their load information in this - cluster. - :type load_metric_information: list[~azure.servicefabric.models.LoadMetricInformation] + :param last_balancing_start_time_utc: The starting time of last resource + balancing run. + :type last_balancing_start_time_utc: datetime + :param last_balancing_end_time_utc: The end time of last resource + balancing run. + :type last_balancing_end_time_utc: datetime + :param load_metric_information: List that contains metrics and their load + information in this cluster. + :type load_metric_information: + list[~azure.servicefabric.models.LoadMetricInformation] """ _attribute_map = { @@ -7075,21 +5870,14 @@ class ClusterLoadInfo(msrest.serialization.Model): 'load_metric_information': {'key': 'LoadMetricInformation', 'type': '[LoadMetricInformation]'}, } - def __init__( - self, - *, - last_balancing_start_time_utc: Optional[datetime.datetime] = None, - last_balancing_end_time_utc: Optional[datetime.datetime] = None, - load_metric_information: Optional[List["LoadMetricInformation"]] = None, - **kwargs - ): + def __init__(self, *, last_balancing_start_time_utc=None, last_balancing_end_time_utc=None, load_metric_information=None, **kwargs) -> None: super(ClusterLoadInfo, self).__init__(**kwargs) self.last_balancing_start_time_utc = last_balancing_start_time_utc self.last_balancing_end_time_utc = last_balancing_end_time_utc self.load_metric_information = load_metric_information -class ClusterManifest(msrest.serialization.Model): +class ClusterManifest(Model): """Information about the cluster manifest. :param manifest: The contents of the cluster manifest file. @@ -7100,12 +5888,7 @@ class ClusterManifest(msrest.serialization.Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__( - self, - *, - manifest: Optional[str] = None, - **kwargs - ): + def __init__(self, *, manifest: str=None, **kwargs) -> None: super(ClusterManifest, self).__init__(**kwargs) self.manifest = manifest @@ -7115,38 +5898,18 @@ class ClusterNewHealthReportEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param source_id: Required. Id of report source. :type source_id: str :param property: Required. Describes the property. @@ -7159,16 +5922,17 @@ class ClusterNewHealthReportEvent(ClusterEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, 'health_state': {'required': True}, @@ -7180,11 +5944,11 @@ class ClusterNewHealthReportEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, @@ -7195,25 +5959,8 @@ class ClusterNewHealthReportEvent(ClusterEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ClusterNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ClusterNewHealthReport' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -7222,6 +5969,7 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ClusterNewHealthReport' class ClusterUpgradeCompletedEvent(ClusterEvent): @@ -7229,122 +5977,102 @@ class ClusterUpgradeCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in - milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - target_cluster_version: str, - overall_upgrade_elapsed_time_in_ms: float, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ClusterUpgradeCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ClusterUpgradeCompleted' # type: str self.target_cluster_version = target_cluster_version self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ClusterUpgradeCompleted' -class ClusterUpgradeDescriptionObject(msrest.serialization.Model): +class ClusterUpgradeDescriptionObject(Model): """Represents a ServiceFabric cluster upgrade. - :param config_version: The cluster configuration version (specified in the cluster manifest). + :param config_version: The cluster configuration version (specified in the + cluster manifest). :type config_version: str :param code_version: The ServiceFabric code version of the cluster. :type code_version: str - :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values - include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: The kind of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling'. Default value: + "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible - values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", - "ReverseLexicographical". Default value: "Default". + :param sort_order: Defines the order in which an upgrade proceeds through + the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', + 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default + value: "Default" . :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than - absolute health evaluation after completion of each upgrade domain. + :param enable_delta_health_evaluation: When true, enables delta health + evaluation rather than absolute health evaluation after completion of each + upgrade domain. :type enable_delta_health_evaluation: bool - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of - the cluster during a cluster upgrade. + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param cluster_upgrade_health_policy: Defines a health policy used to + evaluate the health of the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Represents the map of application health policies for a - ServiceFabric cluster upgrade. + :param application_health_policy_map: Represents the map of application + health policies for a ServiceFabric cluster upgrade :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicyMapObject """ @@ -7364,23 +6092,7 @@ class ClusterUpgradeDescriptionObject(msrest.serialization.Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicyMapObject'}, } - def __init__( - self, - *, - config_version: Optional[str] = None, - code_version: Optional[str] = None, - upgrade_kind: Optional[Union[str, "UpgradeKind"]] = "Rolling", - rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", - upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, - force_restart: Optional[bool] = False, - sort_order: Optional[Union[str, "UpgradeSortOrder"]] = "Default", - enable_delta_health_evaluation: Optional[bool] = None, - monitoring_policy: Optional["MonitoringPolicyDescription"] = None, - cluster_health_policy: Optional["ClusterHealthPolicy"] = None, - cluster_upgrade_health_policy: Optional["ClusterUpgradeHealthPolicyObject"] = None, - application_health_policy_map: Optional["ApplicationHealthPolicyMapObject"] = None, - **kwargs - ): + def __init__(self, *, config_version: str=None, code_version: str=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, sort_order="Default", enable_delta_health_evaluation: bool=None, monitoring_policy=None, cluster_health_policy=None, cluster_upgrade_health_policy=None, application_health_policy_map=None, **kwargs) -> None: super(ClusterUpgradeDescriptionObject, self).__init__(**kwargs) self.config_version = config_version self.code_version = code_version @@ -7401,53 +6113,33 @@ class ClusterUpgradeDomainCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param upgrade_state: Required. State of upgrade. :type upgrade_state: str :param upgrade_domains: Required. Upgrade domains. :type upgrade_domains: str - :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain upgrade in - milli-seconds. + :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain + upgrade in milli-seconds. :type upgrade_domain_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'upgrade_state': {'required': True}, 'upgrade_domains': {'required': True}, @@ -7455,53 +6147,46 @@ class ClusterUpgradeDomainCompletedEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, 'upgrade_domains': {'key': 'UpgradeDomains', 'type': 'str'}, 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - target_cluster_version: str, - upgrade_state: str, - upgrade_domains: str, - upgrade_domain_elapsed_time_in_ms: float, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, upgrade_state: str, upgrade_domains: str, upgrade_domain_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ClusterUpgradeDomainCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ClusterUpgradeDomainCompleted' # type: str self.target_cluster_version = target_cluster_version self.upgrade_state = upgrade_state self.upgrade_domains = upgrade_domains self.upgrade_domain_elapsed_time_in_ms = upgrade_domain_elapsed_time_in_ms + self.kind = 'ClusterUpgradeDomainCompleted' -class ClusterUpgradeHealthPolicyObject(msrest.serialization.Model): - """Defines a health policy used to evaluate the health of the cluster during a cluster upgrade. +class ClusterUpgradeHealthPolicyObject(Model): + """Defines a health policy used to evaluate the health of the cluster during a + cluster upgrade. - :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage of nodes health - degradation allowed during cluster upgrades. The delta is measured between the state of the - nodes at the beginning of upgrade and the state of the nodes at the time of the health - evaluation. The check is performed after every upgrade domain upgrade completion to make sure - the global state of the cluster is within tolerated limits. The default value is 10%. + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage + of nodes health degradation allowed during cluster upgrades. The delta is + measured between the state of the nodes at the beginning of upgrade and + the state of the nodes at the time of the health evaluation. The check is + performed after every upgrade domain upgrade completion to make sure the + global state of the cluster is within tolerated limits. The default value + is 10%. :type max_percent_delta_unhealthy_nodes: int - :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum allowed percentage of - upgrade domain nodes health degradation allowed during cluster upgrades. The delta is measured - between the state of the upgrade domain nodes at the beginning of upgrade and the state of the - upgrade domain nodes at the time of the health evaluation. The check is performed after every - upgrade domain upgrade completion for all completed upgrade domains to make sure the state of - the upgrade domains is within tolerated limits. The default value is 15%. + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum + allowed percentage of upgrade domain nodes health degradation allowed + during cluster upgrades. The delta is measured between the state of the + upgrade domain nodes at the beginning of upgrade and the state of the + upgrade domain nodes at the time of the health evaluation. The check is + performed after every upgrade domain upgrade completion for all completed + upgrade domains to make sure the state of the upgrade domains is within + tolerated limits. The default value is 15%. :type max_percent_upgrade_domain_delta_unhealthy_nodes: int """ @@ -7515,63 +6200,63 @@ class ClusterUpgradeHealthPolicyObject(msrest.serialization.Model): 'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'MaxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'}, } - def __init__( - self, - *, - max_percent_delta_unhealthy_nodes: Optional[int] = None, - max_percent_upgrade_domain_delta_unhealthy_nodes: Optional[int] = None, - **kwargs - ): + def __init__(self, *, max_percent_delta_unhealthy_nodes: int=None, max_percent_upgrade_domain_delta_unhealthy_nodes: int=None, **kwargs) -> None: super(ClusterUpgradeHealthPolicyObject, self).__init__(**kwargs) self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes self.max_percent_upgrade_domain_delta_unhealthy_nodes = max_percent_upgrade_domain_delta_unhealthy_nodes -class ClusterUpgradeProgressObject(msrest.serialization.Model): +class ClusterUpgradeProgressObject(Model): """Information about a cluster upgrade. :param code_version: The ServiceFabric code version of the cluster. :type code_version: str - :param config_version: The cluster configuration version (specified in the cluster manifest). + :param config_version: The cluster configuration version (specified in the + cluster manifest). :type config_version: str :param upgrade_domains: List of upgrade domains and their statuses. :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] - :param upgrade_state: The state of the upgrade domain. Possible values include: "Invalid", - "RollingBackInProgress", "RollingBackCompleted", "RollingForwardPending", - "RollingForwardInProgress", "RollingForwardCompleted", "Failed". + :param upgrade_state: The state of the upgrade domain. Possible values + include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', + 'RollingForwardPending', 'RollingForwardInProgress', + 'RollingForwardCompleted', 'Failed' :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState - :param next_upgrade_domain: The name of the next upgrade domain to be processed. + :param next_upgrade_domain: The name of the next upgrade domain to be + processed. :type next_upgrade_domain: str - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_description: Represents a ServiceFabric cluster upgrade. - :type upgrade_description: ~azure.servicefabric.models.ClusterUpgradeDescriptionObject - :param upgrade_duration_in_milliseconds: The estimated elapsed time spent processing the - current overall upgrade. + :param upgrade_description: Represents a ServiceFabric cluster upgrade + :type upgrade_description: + ~azure.servicefabric.models.ClusterUpgradeDescriptionObject + :param upgrade_duration_in_milliseconds: The estimated elapsed time spent + processing the current overall upgrade. :type upgrade_duration_in_milliseconds: str - :param upgrade_domain_duration_in_milliseconds: The estimated elapsed time spent processing the - current upgrade domain. + :param upgrade_domain_duration_in_milliseconds: The estimated elapsed time + spent processing the current upgrade domain. :type upgrade_domain_duration_in_milliseconds: str - :param unhealthy_evaluations: List of health evaluations that resulted in the current - aggregated health state. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current in-progress upgrade - domain. + :param unhealthy_evaluations: List of health evaluations that resulted in + the current aggregated health state. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current + in-progress upgrade domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo :param start_timestamp_utc: The start time of the upgrade in UTC. :type start_timestamp_utc: str :param failure_timestamp_utc: The failure time of the upgrade in UTC. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being - executed. Possible values include: "None", "Interrupted", "HealthCheck", - "UpgradeDomainTimeout", "OverallUpgradeTimeout". + :param failure_reason: The cause of an upgrade failure that resulted in + FailureAction being executed. Possible values include: 'None', + 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', + 'OverallUpgradeTimeout' :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: The detailed upgrade progress for nodes in the - current upgrade domain at the point of failure. + :param upgrade_domain_progress_at_failure: The detailed upgrade progress + for nodes in the current upgrade domain at the point of failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailedUpgradeDomainProgressObject """ @@ -7594,26 +6279,7 @@ class ClusterUpgradeProgressObject(msrest.serialization.Model): 'upgrade_domain_progress_at_failure': {'key': 'UpgradeDomainProgressAtFailure', 'type': 'FailedUpgradeDomainProgressObject'}, } - def __init__( - self, - *, - code_version: Optional[str] = None, - config_version: Optional[str] = None, - upgrade_domains: Optional[List["UpgradeDomainInfo"]] = None, - upgrade_state: Optional[Union[str, "UpgradeState"]] = None, - next_upgrade_domain: Optional[str] = None, - rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", - upgrade_description: Optional["ClusterUpgradeDescriptionObject"] = None, - upgrade_duration_in_milliseconds: Optional[str] = None, - upgrade_domain_duration_in_milliseconds: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - current_upgrade_domain_progress: Optional["CurrentUpgradeDomainProgressInfo"] = None, - start_timestamp_utc: Optional[str] = None, - failure_timestamp_utc: Optional[str] = None, - failure_reason: Optional[Union[str, "FailureReason"]] = None, - upgrade_domain_progress_at_failure: Optional["FailedUpgradeDomainProgressObject"] = None, - **kwargs - ): + def __init__(self, *, code_version: str=None, config_version: str=None, upgrade_domains=None, upgrade_state=None, next_upgrade_domain: str=None, rolling_upgrade_mode="UnmonitoredAuto", upgrade_description=None, upgrade_duration_in_milliseconds: str=None, upgrade_domain_duration_in_milliseconds: str=None, unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc: str=None, failure_timestamp_utc: str=None, failure_reason=None, upgrade_domain_progress_at_failure=None, **kwargs) -> None: super(ClusterUpgradeProgressObject, self).__init__(**kwargs) self.code_version = code_version self.config_version = config_version @@ -7637,84 +6303,53 @@ class ClusterUpgradeRollbackCompletedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param failure_reason: Required. Describes failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in - milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'failure_reason': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - target_cluster_version: str, - failure_reason: str, - overall_upgrade_elapsed_time_in_ms: float, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ClusterUpgradeRollbackCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ClusterUpgradeRollbackCompleted' # type: str self.target_cluster_version = target_cluster_version self.failure_reason = failure_reason self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ClusterUpgradeRollbackCompleted' class ClusterUpgradeRollbackStartedEvent(ClusterEvent): @@ -7722,84 +6357,53 @@ class ClusterUpgradeRollbackStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param target_cluster_version: Required. Target Cluster version. :type target_cluster_version: str :param failure_reason: Required. Describes failure. :type failure_reason: str - :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of upgrade in - milli-seconds. + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. :type overall_upgrade_elapsed_time_in_ms: float """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'target_cluster_version': {'required': True}, 'failure_reason': {'required': True}, 'overall_upgrade_elapsed_time_in_ms': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - target_cluster_version: str, - failure_reason: str, - overall_upgrade_elapsed_time_in_ms: float, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ClusterUpgradeRollbackStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ClusterUpgradeRollbackStarted' # type: str self.target_cluster_version = target_cluster_version self.failure_reason = failure_reason self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ClusterUpgradeRollbackStarted' class ClusterUpgradeStartedEvent(ClusterEvent): @@ -7807,38 +6411,18 @@ class ClusterUpgradeStartedEvent(ClusterEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param current_cluster_version: Required. Current Cluster version. :type current_cluster_version: str :param target_cluster_version: Required. Target Cluster version. @@ -7852,9 +6436,9 @@ class ClusterUpgradeStartedEvent(ClusterEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'current_cluster_version': {'required': True}, 'target_cluster_version': {'required': True}, 'upgrade_type': {'required': True}, @@ -7863,11 +6447,11 @@ class ClusterUpgradeStartedEvent(ClusterEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'current_cluster_version': {'key': 'CurrentClusterVersion', 'type': 'str'}, 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, 'upgrade_type': {'key': 'UpgradeType', 'type': 'str'}, @@ -7875,30 +6459,17 @@ class ClusterUpgradeStartedEvent(ClusterEvent): 'failure_action': {'key': 'FailureAction', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - current_cluster_version: str, - target_cluster_version: str, - upgrade_type: str, - rolling_upgrade_mode: str, - failure_action: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, current_cluster_version: str, target_cluster_version: str, upgrade_type: str, rolling_upgrade_mode: str, failure_action: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ClusterUpgradeStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ClusterUpgradeStarted' # type: str self.current_cluster_version = current_cluster_version self.target_cluster_version = target_cluster_version self.upgrade_type = upgrade_type self.rolling_upgrade_mode = rolling_upgrade_mode self.failure_action = failure_action + self.kind = 'ClusterUpgradeStarted' -class ClusterVersion(msrest.serialization.Model): +class ClusterVersion(Model): """The cluster version. :param version: The Service Fabric cluster runtime version. @@ -7909,39 +6480,38 @@ class ClusterVersion(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - *, - version: Optional[str] = None, - **kwargs - ): + def __init__(self, *, version: str=None, **kwargs) -> None: super(ClusterVersion, self).__init__(**kwargs) self.version = version -class CodePackageEntryPoint(msrest.serialization.Model): - """Information about setup or main entry point of a code package deployed on a Service Fabric node. +class CodePackageEntryPoint(Model): + """Information about setup or main entry point of a code package deployed on a + Service Fabric node. - :param entry_point_location: The location of entry point executable on the node. + :param entry_point_location: The location of entry point executable on the + node. :type entry_point_location: str :param process_id: The process ID of the entry point. :type process_id: str - :param run_as_user_name: The user name under which entry point executable is run on the node. + :param run_as_user_name: The user name under which entry point executable + is run on the node. :type run_as_user_name: str - :param code_package_entry_point_statistics: Statistics about setup or main entry point of a - code package deployed on a Service Fabric node. + :param code_package_entry_point_statistics: Statistics about setup or main + entry point of a code package deployed on a Service Fabric node. :type code_package_entry_point_statistics: ~azure.servicefabric.models.CodePackageEntryPointStatistics - :param status: Specifies the status of the code package entry point deployed on a Service - Fabric node. Possible values include: "Invalid", "Pending", "Starting", "Started", "Stopping", - "Stopped". + :param status: Specifies the status of the code package entry point + deployed on a Service Fabric node. Possible values include: 'Invalid', + 'Pending', 'Starting', 'Started', 'Stopping', 'Stopped' :type status: str or ~azure.servicefabric.models.EntryPointStatus - :param next_activation_time: The time (in UTC) when the entry point executable will be run - next. - :type next_activation_time: ~datetime.datetime - :param instance_id: The instance ID for current running entry point. For a code package setup - entry point (if specified) runs first and after it finishes main entry point is started. Each - time entry point executable is run, its instance id will change. + :param next_activation_time: The time (in UTC) when the entry point + executable will be run next. + :type next_activation_time: datetime + :param instance_id: The instance ID for current running entry point. For a + code package setup entry point (if specified) runs first and after it + finishes main entry point is started. Each time entry point executable is + run, its instance id will change. :type instance_id: str """ @@ -7955,18 +6525,7 @@ class CodePackageEntryPoint(msrest.serialization.Model): 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__( - self, - *, - entry_point_location: Optional[str] = None, - process_id: Optional[str] = None, - run_as_user_name: Optional[str] = None, - code_package_entry_point_statistics: Optional["CodePackageEntryPointStatistics"] = None, - status: Optional[Union[str, "EntryPointStatus"]] = None, - next_activation_time: Optional[datetime.datetime] = None, - instance_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, entry_point_location: str=None, process_id: str=None, run_as_user_name: str=None, code_package_entry_point_statistics=None, status=None, next_activation_time=None, instance_id: str=None, **kwargs) -> None: super(CodePackageEntryPoint, self).__init__(**kwargs) self.entry_point_location = entry_point_location self.process_id = process_id @@ -7977,35 +6536,39 @@ def __init__( self.instance_id = instance_id -class CodePackageEntryPointStatistics(msrest.serialization.Model): - """Statistics about setup or main entry point of a code package deployed on a Service Fabric node. +class CodePackageEntryPointStatistics(Model): + """Statistics about setup or main entry point of a code package deployed on a + Service Fabric node. :param last_exit_code: The last exit code of the entry point. :type last_exit_code: str - :param last_activation_time: The last time (in UTC) when Service Fabric attempted to run the - entry point. - :type last_activation_time: ~datetime.datetime - :param last_exit_time: The last time (in UTC) when the entry point finished running. - :type last_exit_time: ~datetime.datetime - :param last_successful_activation_time: The last time (in UTC) when the entry point ran - successfully. - :type last_successful_activation_time: ~datetime.datetime - :param last_successful_exit_time: The last time (in UTC) when the entry point finished running - gracefully. - :type last_successful_exit_time: ~datetime.datetime + :param last_activation_time: The last time (in UTC) when Service Fabric + attempted to run the entry point. + :type last_activation_time: datetime + :param last_exit_time: The last time (in UTC) when the entry point + finished running. + :type last_exit_time: datetime + :param last_successful_activation_time: The last time (in UTC) when the + entry point ran successfully. + :type last_successful_activation_time: datetime + :param last_successful_exit_time: The last time (in UTC) when the entry + point finished running gracefully. + :type last_successful_exit_time: datetime :param activation_count: Number of times the entry point has run. :type activation_count: str - :param activation_failure_count: Number of times the entry point failed to run. + :param activation_failure_count: Number of times the entry point failed to + run. :type activation_failure_count: str - :param continuous_activation_failure_count: Number of times the entry point continuously failed - to run. + :param continuous_activation_failure_count: Number of times the entry + point continuously failed to run. :type continuous_activation_failure_count: str :param exit_count: Number of times the entry point finished running. :type exit_count: str - :param exit_failure_count: Number of times the entry point failed to exit gracefully. + :param exit_failure_count: Number of times the entry point failed to exit + gracefully. :type exit_failure_count: str - :param continuous_exit_failure_count: Number of times the entry point continuously failed to - exit gracefully. + :param continuous_exit_failure_count: Number of times the entry point + continuously failed to exit gracefully. :type continuous_exit_failure_count: str """ @@ -8023,22 +6586,7 @@ class CodePackageEntryPointStatistics(msrest.serialization.Model): 'continuous_exit_failure_count': {'key': 'ContinuousExitFailureCount', 'type': 'str'}, } - def __init__( - self, - *, - last_exit_code: Optional[str] = None, - last_activation_time: Optional[datetime.datetime] = None, - last_exit_time: Optional[datetime.datetime] = None, - last_successful_activation_time: Optional[datetime.datetime] = None, - last_successful_exit_time: Optional[datetime.datetime] = None, - activation_count: Optional[str] = None, - activation_failure_count: Optional[str] = None, - continuous_activation_failure_count: Optional[str] = None, - exit_count: Optional[str] = None, - exit_failure_count: Optional[str] = None, - continuous_exit_failure_count: Optional[str] = None, - **kwargs - ): + def __init__(self, *, last_exit_code: str=None, last_activation_time=None, last_exit_time=None, last_successful_activation_time=None, last_successful_exit_time=None, activation_count: str=None, activation_failure_count: str=None, continuous_activation_failure_count: str=None, exit_count: str=None, exit_failure_count: str=None, continuous_exit_failure_count: str=None, **kwargs) -> None: super(CodePackageEntryPointStatistics, self).__init__(**kwargs) self.last_exit_code = last_exit_code self.last_activation_time = last_activation_time @@ -8053,17 +6601,20 @@ def __init__( self.continuous_exit_failure_count = continuous_exit_failure_count -class ComposeDeploymentStatusInfo(msrest.serialization.Model): +class ComposeDeploymentStatusInfo(Model): """Information about a Service Fabric compose deployment. :param name: The name of the deployment. :type name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param status: The status of the compose deployment. Possible values include: "Invalid", - "Provisioning", "Creating", "Ready", "Unprovisioning", "Deleting", "Failed", "Upgrading". + :param status: The status of the compose deployment. Possible values + include: 'Invalid', 'Provisioning', 'Creating', 'Ready', 'Unprovisioning', + 'Deleting', 'Failed', 'Upgrading' :type status: str or ~azure.servicefabric.models.ComposeDeploymentStatus - :param status_details: The status details of compose deployment including failure message. + :param status_details: The status details of compose deployment including + failure message. :type status_details: str """ @@ -8074,15 +6625,7 @@ class ComposeDeploymentStatusInfo(msrest.serialization.Model): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - application_name: Optional[str] = None, - status: Optional[Union[str, "ComposeDeploymentStatus"]] = None, - status_details: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, application_name: str=None, status=None, status_details: str=None, **kwargs) -> None: super(ComposeDeploymentStatusInfo, self).__init__(**kwargs) self.name = name self.application_name = application_name @@ -8090,40 +6633,48 @@ def __init__( self.status_details = status_details -class ComposeDeploymentUpgradeDescription(msrest.serialization.Model): +class ComposeDeploymentUpgradeDescription(Model): """Describes the parameters for a compose deployment upgrade. All required parameters must be populated in order to send to Azure. :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: Required. The content of the compose file that describes the - deployment to create. + :param compose_file_content: Required. The content of the compose file + that describes the deployment to create. :type compose_file_content: str - :param registry_credential: Credential information to connect to container registry. + :param registry_credential: Credential information to connect to container + registry. :type registry_credential: ~azure.servicefabric.models.RegistryCredential - :param upgrade_kind: Required. The kind of upgrade out of the following possible values. - Possible values include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate the health of an - application or one of its children entities. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy """ _validation = { @@ -8144,20 +6695,7 @@ class ComposeDeploymentUpgradeDescription(msrest.serialization.Model): 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, } - def __init__( - self, - *, - deployment_name: str, - compose_file_content: str, - upgrade_kind: Union[str, "UpgradeKind"] = "Rolling", - registry_credential: Optional["RegistryCredential"] = None, - rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", - upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, - force_restart: Optional[bool] = False, - monitoring_policy: Optional["MonitoringPolicyDescription"] = None, - application_health_policy: Optional["ApplicationHealthPolicy"] = None, - **kwargs - ): + def __init__(self, *, deployment_name: str, compose_file_content: str, registry_credential=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, monitoring_policy=None, application_health_policy=None, **kwargs) -> None: super(ComposeDeploymentUpgradeDescription, self).__init__(**kwargs) self.deployment_name = deployment_name self.compose_file_content = compose_file_content @@ -8170,78 +6708,92 @@ def __init__( self.application_health_policy = application_health_policy -class ComposeDeploymentUpgradeProgressInfo(msrest.serialization.Model): +class ComposeDeploymentUpgradeProgressInfo(Model): """Describes the parameters for a compose deployment upgrade. :param deployment_name: The name of the target deployment. :type deployment_name: str - :param application_name: The name of the target application, including the 'fabric:' URI - scheme. + :param application_name: The name of the target application, including the + 'fabric:' URI scheme. :type application_name: str - :param upgrade_state: The state of the compose deployment upgrade. Possible values include: - "Invalid", "ProvisioningTarget", "RollingForwardInProgress", "RollingForwardPending", - "UnprovisioningCurrent", "RollingForwardCompleted", "RollingBackInProgress", - "UnprovisioningTarget", "RollingBackCompleted", "Failed". - :type upgrade_state: str or ~azure.servicefabric.models.ComposeDeploymentUpgradeState - :param upgrade_status_details: Additional detailed information about the status of the pending - upgrade. + :param upgrade_state: The state of the compose deployment upgrade. + Possible values include: 'Invalid', 'ProvisioningTarget', + 'RollingForwardInProgress', 'RollingForwardPending', + 'UnprovisioningCurrent', 'RollingForwardCompleted', + 'RollingBackInProgress', 'UnprovisioningTarget', 'RollingBackCompleted', + 'Failed' + :type upgrade_state: str or + ~azure.servicefabric.models.ComposeDeploymentUpgradeState + :param upgrade_status_details: Additional detailed information about the + status of the pending upgrade. :type upgrade_status_details: str - :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values - include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: The kind of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling'. Default value: + "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param application_health_policy: Defines a health policy used to evaluate the health of an - application or one of its children entities. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :param target_application_type_version: The target application type version (found in the - application manifest) for the application upgrade. + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param target_application_type_version: The target application type + version (found in the application manifest) for the application upgrade. :type target_application_type_version: str - :param upgrade_duration: The estimated amount of time that the overall upgrade elapsed. It is - first interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param upgrade_duration: The estimated amount of time that the overall + upgrade elapsed. It is first interpreted as a string representing an ISO + 8601 duration. If that fails, then it is interpreted as a number + representing the total number of milliseconds. :type upgrade_duration: str - :param current_upgrade_domain_duration: The estimated amount of time spent processing current - Upgrade Domain. It is first interpreted as a string representing an ISO 8601 duration. If that - fails, then it is interpreted as a number representing the total number of milliseconds. + :param current_upgrade_domain_duration: The estimated amount of time spent + processing current Upgrade Domain. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type current_upgrade_domain_duration: str - :param application_unhealthy_evaluations: List of health evaluations that resulted in the - current aggregated health state. + :param application_unhealthy_evaluations: List of health evaluations that + resulted in the current aggregated health state. :type application_unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param current_upgrade_domain_progress: Information about the current in-progress upgrade - domain. + :param current_upgrade_domain_progress: Information about the current + in-progress upgrade domain. :type current_upgrade_domain_progress: ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo - :param start_timestamp_utc: The estimated UTC datetime when the upgrade started. + :param start_timestamp_utc: The estimated UTC datetime when the upgrade + started. :type start_timestamp_utc: str - :param failure_timestamp_utc: The estimated UTC datetime when the upgrade failed and - FailureAction was executed. + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade + failed and FailureAction was executed. :type failure_timestamp_utc: str - :param failure_reason: The cause of an upgrade failure that resulted in FailureAction being - executed. Possible values include: "None", "Interrupted", "HealthCheck", - "UpgradeDomainTimeout", "OverallUpgradeTimeout". + :param failure_reason: The cause of an upgrade failure that resulted in + FailureAction being executed. Possible values include: 'None', + 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', + 'OverallUpgradeTimeout' :type failure_reason: str or ~azure.servicefabric.models.FailureReason - :param upgrade_domain_progress_at_failure: Information about the upgrade domain progress at the - time of upgrade failure. + :param upgrade_domain_progress_at_failure: Information about the upgrade + domain progress at the time of upgrade failure. :type upgrade_domain_progress_at_failure: ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo - :param application_upgrade_status_details: Additional details of application upgrade including - failure message. + :param application_upgrade_status_details: Additional details of + application upgrade including failure message. :type application_upgrade_status_details: str """ @@ -8268,31 +6820,7 @@ class ComposeDeploymentUpgradeProgressInfo(msrest.serialization.Model): 'application_upgrade_status_details': {'key': 'ApplicationUpgradeStatusDetails', 'type': 'str'}, } - def __init__( - self, - *, - deployment_name: Optional[str] = None, - application_name: Optional[str] = None, - upgrade_state: Optional[Union[str, "ComposeDeploymentUpgradeState"]] = None, - upgrade_status_details: Optional[str] = None, - upgrade_kind: Optional[Union[str, "UpgradeKind"]] = "Rolling", - rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", - force_restart: Optional[bool] = False, - upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, - monitoring_policy: Optional["MonitoringPolicyDescription"] = None, - application_health_policy: Optional["ApplicationHealthPolicy"] = None, - target_application_type_version: Optional[str] = None, - upgrade_duration: Optional[str] = "PT0H2M0S", - current_upgrade_domain_duration: Optional[str] = "PT0H2M0S", - application_unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - current_upgrade_domain_progress: Optional["CurrentUpgradeDomainProgressInfo"] = None, - start_timestamp_utc: Optional[str] = None, - failure_timestamp_utc: Optional[str] = None, - failure_reason: Optional[Union[str, "FailureReason"]] = None, - upgrade_domain_progress_at_failure: Optional["FailureUpgradeDomainProgressInfo"] = None, - application_upgrade_status_details: Optional[str] = None, - **kwargs - ): + def __init__(self, *, deployment_name: str=None, application_name: str=None, upgrade_state=None, upgrade_status_details: str=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", force_restart: bool=None, upgrade_replica_set_check_timeout_in_seconds: int=None, monitoring_policy=None, application_health_policy=None, target_application_type_version: str=None, upgrade_duration: str=None, current_upgrade_domain_duration: str=None, application_unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc: str=None, failure_timestamp_utc: str=None, failure_reason=None, upgrade_domain_progress_at_failure=None, application_upgrade_status_details: str=None, **kwargs) -> None: super(ComposeDeploymentUpgradeProgressInfo, self).__init__(**kwargs) self.deployment_name = deployment_name self.application_name = application_name @@ -8316,21 +6844,23 @@ def __init__( self.application_upgrade_status_details = application_upgrade_status_details -class ConfigParameterOverride(msrest.serialization.Model): +class ConfigParameterOverride(Model): """Information about a configuration parameter override. All required parameters must be populated in order to send to Azure. - :param section_name: Required. Name of the section for the parameter override. + :param section_name: Required. Name of the section for the parameter + override. :type section_name: str - :param parameter_name: Required. Name of the parameter that has been overridden. + :param parameter_name: Required. Name of the parameter that has been + overridden. :type parameter_name: str :param parameter_value: Required. Value of the overridden parameter. :type parameter_value: str :param timeout: The duration until config override is considered as valid. - :type timeout: ~datetime.timedelta - :param persist_across_upgrade: A value that indicates whether config override will be removed - on upgrade or will still be considered as valid. + :type timeout: timedelta + :param persist_across_upgrade: A value that indicates whether config + override will be removed on upgrade or will still be considered as valid. :type persist_across_upgrade: bool """ @@ -8348,16 +6878,7 @@ class ConfigParameterOverride(msrest.serialization.Model): 'persist_across_upgrade': {'key': 'PersistAcrossUpgrade', 'type': 'bool'}, } - def __init__( - self, - *, - section_name: str, - parameter_name: str, - parameter_value: str, - timeout: Optional[datetime.timedelta] = None, - persist_across_upgrade: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, section_name: str, parameter_name: str, parameter_value: str, timeout=None, persist_across_upgrade: bool=None, **kwargs) -> None: super(ConfigParameterOverride, self).__init__(**kwargs) self.section_name = section_name self.parameter_name = parameter_name @@ -8366,19 +6887,19 @@ def __init__( self.persist_across_upgrade = persist_across_upgrade -class ContainerApiRequestBody(msrest.serialization.Model): +class ContainerApiRequestBody(Model): """parameters for making container API call. All required parameters must be populated in order to send to Azure. - :param http_verb: HTTP verb of container REST API, defaults to "GET". + :param http_verb: HTTP verb of container REST API, defaults to "GET" :type http_verb: str - :param uri_path: Required. URI path of container REST API. + :param uri_path: Required. URI path of container REST API :type uri_path: str - :param content_type: Content type of container REST API request, defaults to - "application/json". + :param content_type: Content type of container REST API request, defaults + to "application/json" :type content_type: str - :param body: HTTP request body of container REST API. + :param body: HTTP request body of container REST API :type body: str """ @@ -8393,15 +6914,7 @@ class ContainerApiRequestBody(msrest.serialization.Model): 'body': {'key': 'Body', 'type': 'str'}, } - def __init__( - self, - *, - uri_path: str, - http_verb: Optional[str] = None, - content_type: Optional[str] = None, - body: Optional[str] = None, - **kwargs - ): + def __init__(self, *, uri_path: str, http_verb: str=None, content_type: str=None, body: str=None, **kwargs) -> None: super(ContainerApiRequestBody, self).__init__(**kwargs) self.http_verb = http_verb self.uri_path = uri_path @@ -8409,7 +6922,7 @@ def __init__( self.body = body -class ContainerApiResponse(msrest.serialization.Model): +class ContainerApiResponse(Model): """Response body that wraps container API result. All required parameters must be populated in order to send to Azure. @@ -8426,28 +6939,24 @@ class ContainerApiResponse(msrest.serialization.Model): 'container_api_result': {'key': 'ContainerApiResult', 'type': 'ContainerApiResult'}, } - def __init__( - self, - *, - container_api_result: "ContainerApiResult", - **kwargs - ): + def __init__(self, *, container_api_result, **kwargs) -> None: super(ContainerApiResponse, self).__init__(**kwargs) self.container_api_result = container_api_result -class ContainerApiResult(msrest.serialization.Model): +class ContainerApiResult(Model): """Container API result. All required parameters must be populated in order to send to Azure. - :param status: Required. HTTP status code returned by the target container API. + :param status: Required. HTTP status code returned by the target container + API :type status: int - :param content_type: HTTP content type. + :param content_type: HTTP content type :type content_type: str - :param content_encoding: HTTP content encoding. + :param content_encoding: HTTP content encoding :type content_encoding: str - :param body: container API result body. + :param body: container API result body :type body: str """ @@ -8462,15 +6971,7 @@ class ContainerApiResult(msrest.serialization.Model): 'body': {'key': 'Body', 'type': 'str'}, } - def __init__( - self, - *, - status: int, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - body: Optional[str] = None, - **kwargs - ): + def __init__(self, *, status: int, content_type: str=None, content_encoding: str=None, body: str=None, **kwargs) -> None: super(ContainerApiResult, self).__init__(**kwargs) self.status = status self.content_type = content_type @@ -8478,10 +6979,11 @@ def __init__( self.body = body -class ContainerCodePackageProperties(msrest.serialization.Model): +class ContainerCodePackageProperties(Model): """Describes a container and its runtime properties. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. @@ -8490,16 +6992,21 @@ class ContainerCodePackageProperties(msrest.serialization.Model): :param image: Required. The Container image to use. :type image: str :param image_registry_credential: Image registry credential. - :type image_registry_credential: ~azure.servicefabric.models.ImageRegistryCredential + :type image_registry_credential: + ~azure.servicefabric.models.ImageRegistryCredential :param entry_point: Override for the default entry point in the container. :type entry_point: str - :param commands: Command array to execute within the container in exec form. + :param commands: Command array to execute within the container in exec + form. :type commands: list[str] - :param environment_variables: The environment variables to set in this container. - :type environment_variables: list[~azure.servicefabric.models.EnvironmentVariable] - :param settings: The settings to set in this container. The setting file path can be fetched - from environment variable "Fabric_SettingPath". The path for Windows container is "C:\secrets". - The path for Linux container is "/var/secrets". + :param environment_variables: The environment variables to set in this + container + :type environment_variables: + list[~azure.servicefabric.models.EnvironmentVariable] + :param settings: The settings to set in this container. The setting file + path can be fetched from environment variable "Fabric_SettingPath". The + path for Windows container is "C:\\\\secrets". The path for Linux + container is "/var/secrets". :type settings: list[~azure.servicefabric.models.Setting] :param labels: The labels to set in this container. :type labels: list[~azure.servicefabric.models.ContainerLabel] @@ -8507,24 +7014,26 @@ class ContainerCodePackageProperties(msrest.serialization.Model): :type endpoints: list[~azure.servicefabric.models.EndpointProperties] :param resources: Required. The resources required by this container. :type resources: ~azure.servicefabric.models.ResourceRequirements - :param volume_refs: Volumes to be attached to the container. The lifetime of these volumes is - independent of the application's lifetime. + :param volume_refs: Volumes to be attached to the container. The lifetime + of these volumes is independent of the application's lifetime. :type volume_refs: list[~azure.servicefabric.models.VolumeReference] - :param volumes: Volumes to be attached to the container. The lifetime of these volumes is - scoped to the application's lifetime. + :param volumes: Volumes to be attached to the container. The lifetime of + these volumes is scoped to the application's lifetime. :type volumes: list[~azure.servicefabric.models.ApplicationScopedVolume] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef - :param reliable_collections_refs: A list of ReliableCollection resources used by this - particular code package. Please refer to ReliableCollectionsRef for more details. - :type reliable_collections_refs: list[~azure.servicefabric.models.ReliableCollectionsRef] + :param reliable_collections_refs: A list of ReliableCollection resources + used by this particular code package. Please refer to + ReliableCollectionsRef for more details. + :type reliable_collections_refs: + list[~azure.servicefabric.models.ReliableCollectionsRef] :ivar instance_view: Runtime information of a container instance. :vartype instance_view: ~azure.servicefabric.models.ContainerInstanceView - :param liveness_probe: An array of liveness probes for a code package. It determines when to - restart a code package. + :param liveness_probe: An array of liveness probes for a code package. It + determines when to restart a code package. :type liveness_probe: list[~azure.servicefabric.models.Probe] - :param readiness_probe: An array of readiness probes for a code package. It determines when to - unpublish an endpoint. + :param readiness_probe: An array of readiness probes for a code package. + It determines when to unpublish an endpoint. :type readiness_probe: list[~azure.servicefabric.models.Probe] """ @@ -8555,27 +7064,7 @@ class ContainerCodePackageProperties(msrest.serialization.Model): 'readiness_probe': {'key': 'readinessProbe', 'type': '[Probe]'}, } - def __init__( - self, - *, - name: str, - image: str, - resources: "ResourceRequirements", - image_registry_credential: Optional["ImageRegistryCredential"] = None, - entry_point: Optional[str] = None, - commands: Optional[List[str]] = None, - environment_variables: Optional[List["EnvironmentVariable"]] = None, - settings: Optional[List["Setting"]] = None, - labels: Optional[List["ContainerLabel"]] = None, - endpoints: Optional[List["EndpointProperties"]] = None, - volume_refs: Optional[List["VolumeReference"]] = None, - volumes: Optional[List["ApplicationScopedVolume"]] = None, - diagnostics: Optional["DiagnosticsRef"] = None, - reliable_collections_refs: Optional[List["ReliableCollectionsRef"]] = None, - liveness_probe: Optional[List["Probe"]] = None, - readiness_probe: Optional[List["Probe"]] = None, - **kwargs - ): + def __init__(self, *, name: str, image: str, resources, image_registry_credential=None, entry_point: str=None, commands=None, environment_variables=None, settings=None, labels=None, endpoints=None, volume_refs=None, volumes=None, diagnostics=None, reliable_collections_refs=None, liveness_probe=None, readiness_probe=None, **kwargs) -> None: super(ContainerCodePackageProperties, self).__init__(**kwargs) self.name = name self.image = image @@ -8596,7 +7085,7 @@ def __init__( self.readiness_probe = readiness_probe -class ContainerEvent(msrest.serialization.Model): +class ContainerEvent(Model): """A container event. :param name: The name of the container event. @@ -8607,7 +7096,7 @@ class ContainerEvent(msrest.serialization.Model): :type first_timestamp: str :param last_timestamp: Date/time of the last event. :type last_timestamp: str - :param message: The event message. + :param message: The event message :type message: str :param type: The event type. :type type: str @@ -8622,17 +7111,7 @@ class ContainerEvent(msrest.serialization.Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - count: Optional[int] = None, - first_timestamp: Optional[str] = None, - last_timestamp: Optional[str] = None, - message: Optional[str] = None, - type: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, count: int=None, first_timestamp: str=None, last_timestamp: str=None, message: str=None, type: str=None, **kwargs) -> None: super(ContainerEvent, self).__init__(**kwargs) self.name = name self.count = count @@ -8647,71 +7126,44 @@ class ContainerInstanceEvent(FabricEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ContainerInstanceEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ContainerInstanceEvent' # type: str + self.kind = 'ContainerInstanceEvent' -class ContainerInstanceView(msrest.serialization.Model): +class ContainerInstanceView(Model): """Runtime information of a container instance. - :param restart_count: The number of times the container has been restarted. + :param restart_count: The number of times the container has been + restarted. :type restart_count: int :param current_state: Current container instance state. :type current_state: ~azure.servicefabric.models.ContainerState @@ -8728,15 +7180,7 @@ class ContainerInstanceView(msrest.serialization.Model): 'events': {'key': 'events', 'type': '[ContainerEvent]'}, } - def __init__( - self, - *, - restart_count: Optional[int] = None, - current_state: Optional["ContainerState"] = None, - previous_state: Optional["ContainerState"] = None, - events: Optional[List["ContainerEvent"]] = None, - **kwargs - ): + def __init__(self, *, restart_count: int=None, current_state=None, previous_state=None, events=None, **kwargs) -> None: super(ContainerInstanceView, self).__init__(**kwargs) self.restart_count = restart_count self.current_state = current_state @@ -8744,7 +7188,7 @@ def __init__( self.events = events -class ContainerLabel(msrest.serialization.Model): +class ContainerLabel(Model): """Describes a container label. All required parameters must be populated in order to send to Azure. @@ -8765,19 +7209,13 @@ class ContainerLabel(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - value: str, - **kwargs - ): + def __init__(self, *, name: str, value: str, **kwargs) -> None: super(ContainerLabel, self).__init__(**kwargs) self.name = name self.value = value -class ContainerLogs(msrest.serialization.Model): +class ContainerLogs(Model): """Container logs. :param content: Container logs. @@ -8788,27 +7226,22 @@ class ContainerLogs(msrest.serialization.Model): 'content': {'key': 'Content', 'type': 'str'}, } - def __init__( - self, - *, - content: Optional[str] = None, - **kwargs - ): + def __init__(self, *, content: str=None, **kwargs) -> None: super(ContainerLogs, self).__init__(**kwargs) self.content = content -class ContainerState(msrest.serialization.Model): +class ContainerState(Model): """The container state. - :param state: The state of this container. + :param state: The state of this container :type state: str :param start_time: Date/time when the container state started. - :type start_time: ~datetime.datetime + :type start_time: datetime :param exit_code: The container exit code. :type exit_code: str :param finish_time: Date/time when the container state finished. - :type finish_time: ~datetime.datetime + :type finish_time: datetime :param detail_status: Human-readable status of this state. :type detail_status: str """ @@ -8821,16 +7254,7 @@ class ContainerState(msrest.serialization.Model): 'detail_status': {'key': 'detailStatus', 'type': 'str'}, } - def __init__( - self, - *, - state: Optional[str] = None, - start_time: Optional[datetime.datetime] = None, - exit_code: Optional[str] = None, - finish_time: Optional[datetime.datetime] = None, - detail_status: Optional[str] = None, - **kwargs - ): + def __init__(self, *, state: str=None, start_time=None, exit_code: str=None, finish_time=None, detail_status: str=None, **kwargs) -> None: super(ContainerState, self).__init__(**kwargs) self.state = state self.start_time = start_time @@ -8839,17 +7263,18 @@ def __init__( self.detail_status = detail_status -class CreateComposeDeploymentDescription(msrest.serialization.Model): +class CreateComposeDeploymentDescription(Model): """Defines description for creating a Service Fabric compose deployment. All required parameters must be populated in order to send to Azure. :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: Required. The content of the compose file that describes the - deployment to create. + :param compose_file_content: Required. The content of the compose file + that describes the deployment to create. :type compose_file_content: str - :param registry_credential: Credential information to connect to container registry. + :param registry_credential: Credential information to connect to container + registry. :type registry_credential: ~azure.servicefabric.models.RegistryCredential """ @@ -8864,27 +7289,22 @@ class CreateComposeDeploymentDescription(msrest.serialization.Model): 'registry_credential': {'key': 'RegistryCredential', 'type': 'RegistryCredential'}, } - def __init__( - self, - *, - deployment_name: str, - compose_file_content: str, - registry_credential: Optional["RegistryCredential"] = None, - **kwargs - ): + def __init__(self, *, deployment_name: str, compose_file_content: str, registry_credential=None, **kwargs) -> None: super(CreateComposeDeploymentDescription, self).__init__(**kwargs) self.deployment_name = deployment_name self.compose_file_content = compose_file_content self.registry_credential = registry_credential -class CurrentUpgradeDomainProgressInfo(msrest.serialization.Model): +class CurrentUpgradeDomainProgressInfo(Model): """Information about the current in-progress upgrade domain. - :param domain_name: The name of the upgrade domain. + :param domain_name: The name of the upgrade domain :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their statuses. - :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their + statuses + :type node_upgrade_progress_list: + list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -8892,51 +7312,41 @@ class CurrentUpgradeDomainProgressInfo(msrest.serialization.Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__( - self, - *, - domain_name: Optional[str] = None, - node_upgrade_progress_list: Optional[List["NodeUpgradeProgressInfo"]] = None, - **kwargs - ): + def __init__(self, *, domain_name: str=None, node_upgrade_progress_list=None, **kwargs) -> None: super(CurrentUpgradeDomainProgressInfo, self).__init__(**kwargs) self.domain_name = domain_name self.node_upgrade_progress_list = node_upgrade_progress_list -class DeactivationIntentDescription(msrest.serialization.Model): +class DeactivationIntentDescription(Model): """Describes the intent or reason for deactivating the node. - :param deactivation_intent: Describes the intent or reason for deactivating the node. The - possible values are following. Possible values include: "Pause", "Restart", "RemoveData". - :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent + :param deactivation_intent: Describes the intent or reason for + deactivating the node. The possible values are following. Possible values + include: 'Pause', 'Restart', 'RemoveData' + :type deactivation_intent: str or + ~azure.servicefabric.models.DeactivationIntent """ _attribute_map = { 'deactivation_intent': {'key': 'DeactivationIntent', 'type': 'str'}, } - def __init__( - self, - *, - deactivation_intent: Optional[Union[str, "DeactivationIntent"]] = None, - **kwargs - ): + def __init__(self, *, deactivation_intent=None, **kwargs) -> None: super(DeactivationIntentDescription, self).__init__(**kwargs) self.deactivation_intent = deactivation_intent -class ExecutionPolicy(msrest.serialization.Model): +class ExecutionPolicy(Model): """The execution policy of the service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DefaultExecutionPolicy, RunToCompletionExecutionPolicy. + sub-classes are: DefaultExecutionPolicy, RunToCompletionExecutionPolicy All required parameters must be populated in order to send to Azure. - :param type: Required. Enumerates the execution policy types for services.Constant filled by - server. Possible values include: "Default", "RunToCompletion". - :type type: str or ~azure.servicefabric.models.ExecutionPolicyType + :param type: Required. Constant filled by server. + :type type: str """ _validation = { @@ -8951,12 +7361,9 @@ class ExecutionPolicy(msrest.serialization.Model): 'type': {'Default': 'DefaultExecutionPolicy', 'RunToCompletion': 'RunToCompletionExecutionPolicy'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(ExecutionPolicy, self).__init__(**kwargs) - self.type = None # type: Optional[str] + self.type = None class DefaultExecutionPolicy(ExecutionPolicy): @@ -8964,9 +7371,8 @@ class DefaultExecutionPolicy(ExecutionPolicy): All required parameters must be populated in order to send to Azure. - :param type: Required. Enumerates the execution policy types for services.Constant filled by - server. Possible values include: "Default", "RunToCompletion". - :type type: str or ~azure.servicefabric.models.ExecutionPolicyType + :param type: Required. Constant filled by server. + :type type: str """ _validation = { @@ -8977,87 +7383,76 @@ class DefaultExecutionPolicy(ExecutionPolicy): 'type': {'key': 'type', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(DefaultExecutionPolicy, self).__init__(**kwargs) - self.type = 'Default' # type: str + self.type = 'Default' class DeletePropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that deletes a specified property if it exists. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that deletes a specified property if it + exists. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__( - self, - *, - property_name: str, - **kwargs - ): + def __init__(self, *, property_name: str, **kwargs) -> None: super(DeletePropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) - self.kind = 'Delete' # type: str + self.kind = 'Delete' class DeltaNodesCheckHealthEvaluation(HealthEvaluation): - """Represents health evaluation for delta nodes, containing health evaluations for each unhealthy node that impacted current aggregated health state. -Can be returned during cluster upgrade when the aggregated health state of the cluster is Warning or Error. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for delta nodes, containing health evaluations + for each unhealthy node that impacted current aggregated health state. + Can be returned during cluster upgrade when the aggregated health state of + the cluster is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param baseline_error_count: Number of nodes with aggregated heath state Error in the health - store at the beginning of the cluster upgrade. + :param kind: Required. Constant filled by server. + :type kind: str + :param baseline_error_count: Number of nodes with aggregated heath state + Error in the health store at the beginning of the cluster upgrade. :type baseline_error_count: long - :param baseline_total_count: Total number of nodes in the health store at the beginning of the - cluster upgrade. + :param baseline_total_count: Total number of nodes in the health store at + the beginning of the cluster upgrade. :type baseline_total_count: long - :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of delta unhealthy nodes - from the ClusterUpgradeHealthPolicy. + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of + delta unhealthy nodes from the ClusterUpgradeHealthPolicy. :type max_percent_delta_unhealthy_nodes: int :param total_count: Total number of nodes in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. - Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. + Includes all the unhealthy NodeHealthEvaluation that impacted the + aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -9065,9 +7460,9 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, 'max_percent_delta_unhealthy_nodes': {'key': 'MaxPercentDeltaUnhealthyNodes', 'type': 'int'}, @@ -9075,52 +7470,45 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - baseline_error_count: Optional[int] = None, - baseline_total_count: Optional[int] = None, - max_percent_delta_unhealthy_nodes: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, baseline_error_count: int=None, baseline_total_count: int=None, max_percent_delta_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(DeltaNodesCheckHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'DeltaNodesCheck' # type: str self.baseline_error_count = baseline_error_count self.baseline_total_count = baseline_total_count self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeltaNodesCheck' class DeployedApplicationHealth(EntityHealth): - """Information about the health of an application deployed on a Service Fabric node. - - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + """Information about the health of an application deployed on a Service Fabric + node. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: Name of the application deployed on the node whose health information is described - by this object. + :param name: Name of the application deployed on the node whose health + information is described by this object. :type name: str :param node_name: Name of the node where this application is deployed. :type node_name: str - :param deployed_service_package_health_states: Deployed service package health states for the - current deployed application as found in the health store. + :param deployed_service_package_health_states: Deployed service package + health states for the current deployed application as found in the health + store. :type deployed_service_package_health_states: list[~azure.servicefabric.models.DeployedServicePackageHealthState] """ @@ -9135,18 +7523,7 @@ class DeployedApplicationHealth(EntityHealth): 'deployed_service_package_health_states': {'key': 'DeployedServicePackageHealthStates', 'type': '[DeployedServicePackageHealthState]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - name: Optional[str] = None, - node_name: Optional[str] = None, - deployed_service_package_health_states: Optional[List["DeployedServicePackageHealthState"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, node_name: str=None, deployed_service_package_health_states=None, **kwargs) -> None: super(DeployedApplicationHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.name = name self.node_name = node_name @@ -9154,36 +7531,34 @@ def __init__( class DeployedApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a deployed application, containing information about the data and the algorithm used by the health store to evaluate health. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a deployed application, containing + information about the data and the algorithm used by the health store to + evaluate health. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Name of the node where the application is deployed to. :type node_name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the deployed application. - The types of the unhealthy evaluations can be DeployedServicePackagesHealthEvaluation or - EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the deployed application. + The types of the unhealthy evaluations can be + DeployedServicePackagesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -9191,29 +7566,20 @@ class DeployedApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - node_name: Optional[str] = None, - application_name: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, node_name: str=None, application_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: super(DeployedApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'DeployedApplication' # type: str self.node_name = node_name self.application_name = application_name self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeployedApplication' class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): @@ -9221,44 +7587,25 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -9276,16 +7623,17 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'node_name': {'required': True}, @@ -9300,11 +7648,11 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, @@ -9318,28 +7666,8 @@ class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_instance_id: int, - node_name: str, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(DeployedApplicationHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'DeployedApplicationHealthReportExpired' # type: str self.application_instance_id = application_instance_id self.node_name = node_name self.source_id = source_id @@ -9350,18 +7678,24 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'DeployedApplicationHealthReportExpired' class DeployedApplicationHealthState(EntityHealthState): - """Represents the health state of a deployed application, which contains the entity identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param node_name: Name of the node on which the service package is deployed. + """Represents the health state of a deployed application, which contains the + entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is + deployed. :type node_name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str """ @@ -9371,31 +7705,27 @@ class DeployedApplicationHealthState(EntityHealthState): 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - node_name: Optional[str] = None, - application_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, node_name: str=None, application_name: str=None, **kwargs) -> None: super(DeployedApplicationHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.node_name = node_name self.application_name = application_name class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a deployed application, which contains the node where the application is deployed, the aggregated health state and any deployed service packages that respect the chunk query description filters. - - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + """Represents the health state chunk of a deployed application, which contains + the node where the application is deployed, the aggregated health state and + any deployed service packages that respect the chunk query description + filters. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of node where the application is deployed. :type node_name: str - :param deployed_service_package_health_state_chunks: The list of deployed service package - health state chunks belonging to the deployed application that respect the filters in the - cluster health chunk query description. + :param deployed_service_package_health_state_chunks: The list of deployed + service package health state chunks belonging to the deployed application + that respect the filters in the cluster health chunk query description. :type deployed_service_package_health_state_chunks: ~azure.servicefabric.models.DeployedServicePackageHealthStateChunkList """ @@ -9406,87 +7736,93 @@ class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_service_package_health_state_chunks': {'key': 'DeployedServicePackageHealthStateChunks', 'type': 'DeployedServicePackageHealthStateChunkList'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - node_name: Optional[str] = None, - deployed_service_package_health_state_chunks: Optional["DeployedServicePackageHealthStateChunkList"] = None, - **kwargs - ): + def __init__(self, *, health_state=None, node_name: str=None, deployed_service_package_health_state_chunks=None, **kwargs) -> None: super(DeployedApplicationHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.node_name = node_name self.deployed_service_package_health_state_chunks = deployed_service_package_health_state_chunks -class DeployedApplicationHealthStateChunkList(msrest.serialization.Model): - """The list of deployed application health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. +class DeployedApplicationHealthStateChunkList(Model): + """The list of deployed application health state chunks that respect the input + filters in the chunk query. Returned by get cluster health state chunks + query. - :param items: The list of deployed application health state chunks that respect the input - filters in the chunk query. - :type items: list[~azure.servicefabric.models.DeployedApplicationHealthStateChunk] + :param items: The list of deployed application health state chunks that + respect the input filters in the chunk query. + :type items: + list[~azure.servicefabric.models.DeployedApplicationHealthStateChunk] """ _attribute_map = { 'items': {'key': 'Items', 'type': '[DeployedApplicationHealthStateChunk]'}, } - def __init__( - self, - *, - items: Optional[List["DeployedApplicationHealthStateChunk"]] = None, - **kwargs - ): + def __init__(self, *, items=None, **kwargs) -> None: super(DeployedApplicationHealthStateChunkList, self).__init__(**kwargs) self.items = items -class DeployedApplicationHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a deployed application should be included as a child of an application in the cluster health chunk. -The deployed applications are only returned if the parent application matches a filter specified in the cluster health chunk query description. -One filter can match zero, one or multiple deployed applications, depending on its properties. - - :param node_name_filter: The name of the node where the application is deployed in order to - match the filter. - If specified, the filter is applied only to the application deployed on the specified node. - If the application is not deployed on the node with the specified name, no deployed - application is returned in the cluster health chunk based on this filter. - Otherwise, the deployed application is included in the cluster health chunk if it respects the - other filter properties. - If not specified, all deployed applications that match the parent filters (if any) are taken - into consideration and matched against the other filter members, like health state filter. +class DeployedApplicationHealthStateFilter(Model): + """Defines matching criteria to determine whether a deployed application + should be included as a child of an application in the cluster health + chunk. + The deployed applications are only returned if the parent application + matches a filter specified in the cluster health chunk query description. + One filter can match zero, one or multiple deployed applications, depending + on its properties. + + :param node_name_filter: The name of the node where the application is + deployed in order to match the filter. + If specified, the filter is applied only to the application deployed on + the specified node. + If the application is not deployed on the node with the specified name, no + deployed application is returned in the cluster health chunk based on this + filter. + Otherwise, the deployed application is included in the cluster health + chunk if it respects the other filter properties. + If not specified, all deployed applications that match the parent filters + (if any) are taken into consideration and matched against the other filter + members, like health state filter. :type node_name_filter: str - :param health_state_filter: The filter for the health state of the deployed applications. It - allows selecting deployed applications if they match the desired health states. - The possible values are integer value of one of the following health states. Only deployed - applications that match the filter are returned. All deployed applications are used to evaluate - the cluster aggregated health state. - If not specified, default value is None, unless the node name is specified. If the filter has - default value and node name is specified, the matching deployed application is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches deployed applications with HealthState - value of OK (2) and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + deployed applications. It allows selecting deployed applications if they + match the desired health states. + The possible values are integer value of one of the following health + states. Only deployed applications that match the filter are returned. All + deployed applications are used to evaluate the cluster aggregated health + state. + If not specified, default value is None, unless the node name is + specified. If the filter has default value and node name is specified, the + matching deployed application is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed applications + with HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int - :param deployed_service_package_filters: Defines a list of filters that specify which deployed - service packages to be included in the returned cluster health chunk as children of the parent - deployed application. The deployed service packages are returned only if the parent deployed + :param deployed_service_package_filters: Defines a list of filters that + specify which deployed service packages to be included in the returned + cluster health chunk as children of the parent deployed application. The + deployed service packages are returned only if the parent deployed application matches a filter. - If the list is empty, no deployed service packages are returned. All the deployed service - packages are used to evaluate the parent deployed application aggregated health state, - regardless of the input filters. - The deployed application filter may specify multiple deployed service package filters. - For example, it can specify a filter to return all deployed service packages with health state - Error and another filter to always include a deployed service package on a node. + If the list is empty, no deployed service packages are returned. All the + deployed service packages are used to evaluate the parent deployed + application aggregated health state, regardless of the input filters. + The deployed application filter may specify multiple deployed service + package filters. + For example, it can specify a filter to return all deployed service + packages with health state Error and another filter to always include a + deployed service package on a node. :type deployed_service_package_filters: list[~azure.servicefabric.models.DeployedServicePackageHealthStateFilter] """ @@ -9497,49 +7833,47 @@ class DeployedApplicationHealthStateFilter(msrest.serialization.Model): 'deployed_service_package_filters': {'key': 'DeployedServicePackageFilters', 'type': '[DeployedServicePackageHealthStateFilter]'}, } - def __init__( - self, - *, - node_name_filter: Optional[str] = None, - health_state_filter: Optional[int] = 0, - deployed_service_package_filters: Optional[List["DeployedServicePackageHealthStateFilter"]] = None, - **kwargs - ): + def __init__(self, *, node_name_filter: str=None, health_state_filter: int=0, deployed_service_package_filters=None, **kwargs) -> None: super(DeployedApplicationHealthStateFilter, self).__init__(**kwargs) self.node_name_filter = node_name_filter self.health_state_filter = health_state_filter self.deployed_service_package_filters = deployed_service_package_filters -class DeployedApplicationInfo(msrest.serialization.Model): +class DeployedApplicationInfo(Model): """Information about application deployed on the node. - :param id: The identity of the application. This is an encoded representation of the - application name. This is used in the REST APIs to identify the application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type id: str - :param name: The name of the application, including the 'fabric:' URI scheme. + :param name: The name of the application, including the 'fabric:' URI + scheme. :type name: str - :param type_name: The application type name as defined in the application manifest. + :param type_name: The application type name as defined in the application + manifest. :type type_name: str - :param status: The status of the application deployed on the node. Following are the possible - values. Possible values include: "Invalid", "Downloading", "Activating", "Active", "Upgrading", - "Deactivating". + :param status: The status of the application deployed on the node. + Following are the possible values. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' :type status: str or ~azure.servicefabric.models.DeployedApplicationStatus - :param work_directory: The work directory of the application on the node. The work directory - can be used to store application data. + :param work_directory: The work directory of the application on the node. + The work directory can be used to store application data. :type work_directory: str - :param log_directory: The log directory of the application on the node. The log directory can - be used to store application logs. + :param log_directory: The log directory of the application on the node. + The log directory can be used to store application logs. :type log_directory: str - :param temp_directory: The temp directory of the application on the node. The code packages - belonging to the application are forked with this directory set as their temporary directory. + :param temp_directory: The temp directory of the application on the node. + The code packages belonging to the application are forked with this + directory set as their temporary directory. :type temp_directory: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState """ @@ -9554,19 +7888,7 @@ class DeployedApplicationInfo(msrest.serialization.Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__( - self, - *, - id: Optional[str] = None, - name: Optional[str] = None, - type_name: Optional[str] = None, - status: Optional[Union[str, "DeployedApplicationStatus"]] = None, - work_directory: Optional[str] = None, - log_directory: Optional[str] = None, - temp_directory: Optional[str] = None, - health_state: Optional[Union[str, "HealthState"]] = None, - **kwargs - ): + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, status=None, work_directory: str=None, log_directory: str=None, temp_directory: str=None, health_state=None, **kwargs) -> None: super(DeployedApplicationInfo, self).__init__(**kwargs) self.id = id self.name = name @@ -9583,44 +7905,25 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param application_instance_id: Required. Id of Application instance. :type application_instance_id: long @@ -9638,16 +7941,17 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'application_instance_id': {'required': True}, 'node_name': {'required': True}, @@ -9662,11 +7966,11 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, @@ -9680,28 +7984,8 @@ class DeployedApplicationNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - application_instance_id: int, - node_name: str, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(DeployedApplicationNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'DeployedApplicationNewHealthReport' # type: str self.application_instance_id = application_instance_id self.node_name = node_name self.source_id = source_id @@ -9712,41 +7996,41 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'DeployedApplicationNewHealthReport' class DeployedApplicationsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for deployed applications, containing health evaluations for each unhealthy deployed application that impacted current aggregated health state. -Can be returned when evaluating application health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for deployed applications, containing health + evaluations for each unhealthy deployed application that impacted current + aggregated health state. + Can be returned when evaluating application health and the aggregated + health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_deployed_applications: Maximum allowed percentage of unhealthy - deployed applications from the ApplicationHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_deployed_applications: Maximum allowed + percentage of unhealthy deployed applications from the + ApplicationHealthPolicy. :type max_percent_unhealthy_deployed_applications: int - :param total_count: Total number of deployed applications of the application in the health - store. + :param total_count: Total number of deployed applications of the + application in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy DeployedApplicationHealthEvaluation that impacted the - aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + DeployedApplicationHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -9754,66 +8038,64 @@ class DeployedApplicationsHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_deployed_applications': {'key': 'MaxPercentUnhealthyDeployedApplications', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - max_percent_unhealthy_deployed_applications: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_deployed_applications: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(DeployedApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'DeployedApplications' # type: str self.max_percent_unhealthy_deployed_applications = max_percent_unhealthy_deployed_applications self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeployedApplications' -class DeployedCodePackageInfo(msrest.serialization.Model): +class DeployedCodePackageInfo(Model): """Information about code package deployed on a Service Fabric node. :param name: The name of the code package. :type name: str - :param version: The version of the code package specified in service manifest. + :param version: The version of the code package specified in service + manifest. :type version: str - :param service_manifest_name: The name of service manifest that specified this code package. + :param service_manifest_name: The name of service manifest that specified + this code package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_type: Specifies the type of host for main entry point of a code package as - specified in service manifest. Possible values include: "Invalid", "ExeHost", "ContainerHost". + :param host_type: Specifies the type of host for main entry point of a + code package as specified in service manifest. Possible values include: + 'Invalid', 'ExeHost', 'ContainerHost' :type host_type: str or ~azure.servicefabric.models.HostType - :param host_isolation_mode: Specifies the isolation mode of main entry point of a code package - when it's host type is ContainerHost. This is specified as part of container host policies in - application manifest while importing service manifest. Possible values include: "None", - "Process", "HyperV". - :type host_isolation_mode: str or ~azure.servicefabric.models.HostIsolationMode - :param status: Specifies the status of a deployed application or service package on a Service - Fabric node. Possible values include: "Invalid", "Downloading", "Activating", "Active", - "Upgrading", "Deactivating", "RanToCompletion", "Failed". + :param host_isolation_mode: Specifies the isolation mode of main entry + point of a code package when it's host type is ContainerHost. This is + specified as part of container host policies in application manifest while + importing service manifest. Possible values include: 'None', 'Process', + 'HyperV' + :type host_isolation_mode: str or + ~azure.servicefabric.models.HostIsolationMode + :param status: Specifies the status of a deployed application or service + package on a Service Fabric node. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating', + 'RanToCompletion', 'Failed' :type status: str or ~azure.servicefabric.models.DeploymentStatus - :param run_frequency_interval: The interval at which code package is run. This is used for - periodic code package. + :param run_frequency_interval: The interval at which code package is run. + This is used for periodic code package. :type run_frequency_interval: str - :param setup_entry_point: Information about setup or main entry point of a code package - deployed on a Service Fabric node. + :param setup_entry_point: Information about setup or main entry point of a + code package deployed on a Service Fabric node. :type setup_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint - :param main_entry_point: Information about setup or main entry point of a code package deployed - on a Service Fabric node. + :param main_entry_point: Information about setup or main entry point of a + code package deployed on a Service Fabric node. :type main_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint """ @@ -9830,21 +8112,7 @@ class DeployedCodePackageInfo(msrest.serialization.Model): 'main_entry_point': {'key': 'MainEntryPoint', 'type': 'CodePackageEntryPoint'}, } - def __init__( - self, - *, - name: Optional[str] = None, - version: Optional[str] = None, - service_manifest_name: Optional[str] = None, - service_package_activation_id: Optional[str] = None, - host_type: Optional[Union[str, "HostType"]] = None, - host_isolation_mode: Optional[Union[str, "HostIsolationMode"]] = None, - status: Optional[Union[str, "DeploymentStatus"]] = None, - run_frequency_interval: Optional[str] = None, - setup_entry_point: Optional["CodePackageEntryPoint"] = None, - main_entry_point: Optional["CodePackageEntryPoint"] = None, - **kwargs - ): + def __init__(self, *, name: str=None, version: str=None, service_manifest_name: str=None, service_package_activation_id: str=None, host_type=None, host_isolation_mode=None, status=None, run_frequency_interval: str=None, setup_entry_point=None, main_entry_point=None, **kwargs) -> None: super(DeployedCodePackageInfo, self).__init__(**kwargs) self.name = name self.version = version @@ -9859,24 +8127,28 @@ def __init__( class DeployedServicePackageHealth(EntityHealth): - """Information about the health of a service package for a specific application deployed on a Service Fabric node. - - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + """Information about the health of a service package for a specific + application deployed on a Service Fabric node. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str :param service_manifest_name: Name of the service manifest. :type service_manifest_name: str @@ -9894,18 +8166,7 @@ class DeployedServicePackageHealth(EntityHealth): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - application_name: Optional[str] = None, - service_manifest_name: Optional[str] = None, - node_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, application_name: str=None, service_manifest_name: str=None, node_name: str=None, **kwargs) -> None: super(DeployedServicePackageHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.application_name = application_name self.service_manifest_name = service_manifest_name @@ -9913,36 +8174,36 @@ def __init__( class DeployedServicePackageHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a deployed service package, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a deployed service package, containing + information about the data and the algorithm used by health store to + evaluate health. The evaluation is returned only when the aggregated health + state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str :param service_manifest_name: The name of the service manifest. :type service_manifest_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state. The type of the unhealthy evaluations can be EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state. The type of the unhealthy evaluations + can be EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -9950,32 +8211,22 @@ class DeployedServicePackageHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - node_name: Optional[str] = None, - application_name: Optional[str] = None, - service_manifest_name: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, node_name: str=None, application_name: str=None, service_manifest_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: super(DeployedServicePackageHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'DeployedServicePackage' # type: str self.node_name = node_name self.application_name = application_name self.service_manifest_name = service_manifest_name self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeployedServicePackage' class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): @@ -9983,50 +8234,33 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param service_manifest: Required. Service manifest name. :type service_manifest: str - :param service_package_instance_id: Required. Id of Service package instance. + :param service_package_instance_id: Required. Id of Service package + instance. :type service_package_instance_id: long - :param service_package_activation_id: Required. Id of Service package activation. + :param service_package_activation_id: Required. Id of Service package + activation. :type service_package_activation_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -10042,16 +8276,17 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'service_manifest': {'required': True}, 'service_package_instance_id': {'required': True}, @@ -10068,11 +8303,11 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_manifest': {'key': 'ServiceManifest', 'type': 'str'}, 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, @@ -10088,30 +8323,8 @@ class DeployedServicePackageHealthReportExpiredEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - service_manifest: str, - service_package_instance_id: int, - service_package_activation_id: str, - node_name: str, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_manifest: str, service_package_instance_id: int, service_package_activation_id: str, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(DeployedServicePackageHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'DeployedServicePackageHealthReportExpired' # type: str self.service_manifest = service_manifest self.service_package_instance_id = service_package_instance_id self.service_package_activation_id = service_package_activation_id @@ -10124,25 +8337,33 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'DeployedServicePackageHealthReportExpired' class DeployedServicePackageHealthState(EntityHealthState): - """Represents the health state of a deployed service package, containing the entity identifier and the aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param node_name: Name of the node on which the service package is deployed. + """Represents the health state of a deployed service package, containing the + entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is + deployed. :type node_name: str - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param service_manifest_name: Name of the manifest describing the service package. + :param service_manifest_name: Name of the manifest describing the service + package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -10155,16 +8376,7 @@ class DeployedServicePackageHealthState(EntityHealthState): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - node_name: Optional[str] = None, - application_name: Optional[str] = None, - service_manifest_name: Optional[str] = None, - service_package_activation_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, node_name: str=None, application_name: str=None, service_manifest_name: str=None, service_package_activation_id: str=None, **kwargs) -> None: super(DeployedServicePackageHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.node_name = node_name self.application_name = application_name @@ -10173,18 +8385,21 @@ def __init__( class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a deployed service package, which contains the service manifest name and the service package aggregated health state. + """Represents the health state chunk of a deployed service package, which + contains the service manifest name and the service package aggregated + health state. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param service_manifest_name: The name of the service manifest. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -10195,85 +8410,88 @@ class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - service_manifest_name: Optional[str] = None, - service_package_activation_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, health_state=None, service_manifest_name: str=None, service_package_activation_id: str=None, **kwargs) -> None: super(DeployedServicePackageHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.service_manifest_name = service_manifest_name self.service_package_activation_id = service_package_activation_id -class DeployedServicePackageHealthStateChunkList(msrest.serialization.Model): - """The list of deployed service package health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. +class DeployedServicePackageHealthStateChunkList(Model): + """The list of deployed service package health state chunks that respect the + input filters in the chunk query. Returned by get cluster health state + chunks query. - :param items: The list of deployed service package health state chunks that respect the input - filters in the chunk query. - :type items: list[~azure.servicefabric.models.DeployedServicePackageHealthStateChunk] + :param items: The list of deployed service package health state chunks + that respect the input filters in the chunk query. + :type items: + list[~azure.servicefabric.models.DeployedServicePackageHealthStateChunk] """ _attribute_map = { 'items': {'key': 'Items', 'type': '[DeployedServicePackageHealthStateChunk]'}, } - def __init__( - self, - *, - items: Optional[List["DeployedServicePackageHealthStateChunk"]] = None, - **kwargs - ): + def __init__(self, *, items=None, **kwargs) -> None: super(DeployedServicePackageHealthStateChunkList, self).__init__(**kwargs) self.items = items -class DeployedServicePackageHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a deployed service package should be included as a child of a deployed application in the cluster health chunk. -The deployed service packages are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent deployed application and its parent application must be included in the cluster health chunk. -One filter can match zero, one or multiple deployed service packages, depending on its properties. - - :param service_manifest_name_filter: The name of the service manifest which identifies the - deployed service packages that matches the filter. - If specified, the filter is applied only to the specified deployed service packages, if any. - If no deployed service packages with specified manifest name exist, nothing is returned in the - cluster health chunk based on this filter. - If any deployed service package exists, they are included in the cluster health chunk if it - respects the other filter properties. - If not specified, all deployed service packages that match the parent filters (if any) are - taken into consideration and matched against the other filter members, like health state - filter. +class DeployedServicePackageHealthStateFilter(Model): + """Defines matching criteria to determine whether a deployed service package + should be included as a child of a deployed application in the cluster + health chunk. + The deployed service packages are only returned if the parent entities + match a filter specified in the cluster health chunk query description. The + parent deployed application and its parent application must be included in + the cluster health chunk. + One filter can match zero, one or multiple deployed service packages, + depending on its properties. + + :param service_manifest_name_filter: The name of the service manifest + which identifies the deployed service packages that matches the filter. + If specified, the filter is applied only to the specified deployed service + packages, if any. + If no deployed service packages with specified manifest name exist, + nothing is returned in the cluster health chunk based on this filter. + If any deployed service package exists, they are included in the cluster + health chunk if it respects the other filter properties. + If not specified, all deployed service packages that match the parent + filters (if any) are taken into consideration and matched against the + other filter members, like health state filter. :type service_manifest_name_filter: str - :param service_package_activation_id_filter: The activation ID of a deployed service package - that matches the filter. - If not specified, the filter applies to all deployed service packages that match the other - parameters. - If specified, the filter matches only the deployed service package with the specified - activation ID. + :param service_package_activation_id_filter: The activation ID of a + deployed service package that matches the filter. + If not specified, the filter applies to all deployed service packages that + match the other parameters. + If specified, the filter matches only the deployed service package with + the specified activation ID. :type service_package_activation_id_filter: str - :param health_state_filter: The filter for the health state of the deployed service packages. - It allows selecting deployed service packages if they match the desired health states. - The possible values are integer value of one of the following health states. Only deployed - service packages that match the filter are returned. All deployed service packages are used to - evaluate the parent deployed application aggregated health state. - If not specified, default value is None, unless the deployed service package ID is specified. - If the filter has default value and deployed service package ID is specified, the matching - deployed service package is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches deployed service packages with HealthState - value of OK (2) and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + deployed service packages. It allows selecting deployed service packages + if they match the desired health states. + The possible values are integer value of one of the following health + states. Only deployed service packages that match the filter are returned. + All deployed service packages are used to evaluate the parent deployed + application aggregated health state. + If not specified, default value is None, unless the deployed service + package ID is specified. If the filter has default value and deployed + service package ID is specified, the matching deployed service package is + returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed service + packages with HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int """ @@ -10283,35 +8501,32 @@ class DeployedServicePackageHealthStateFilter(msrest.serialization.Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__( - self, - *, - service_manifest_name_filter: Optional[str] = None, - service_package_activation_id_filter: Optional[str] = None, - health_state_filter: Optional[int] = 0, - **kwargs - ): + def __init__(self, *, service_manifest_name_filter: str=None, service_package_activation_id_filter: str=None, health_state_filter: int=0, **kwargs) -> None: super(DeployedServicePackageHealthStateFilter, self).__init__(**kwargs) self.service_manifest_name_filter = service_manifest_name_filter self.service_package_activation_id_filter = service_package_activation_id_filter self.health_state_filter = health_state_filter -class DeployedServicePackageInfo(msrest.serialization.Model): +class DeployedServicePackageInfo(Model): """Information about service package deployed on a Service Fabric node. - :param name: The name of the service package as specified in the service manifest. + :param name: The name of the service package as specified in the service + manifest. :type name: str - :param version: The version of the service package specified in service manifest. + :param version: The version of the service package specified in service + manifest. :type version: str - :param status: Specifies the status of a deployed application or service package on a Service - Fabric node. Possible values include: "Invalid", "Downloading", "Activating", "Active", - "Upgrading", "Deactivating", "RanToCompletion", "Failed". + :param status: Specifies the status of a deployed application or service + package on a Service Fabric node. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating', + 'RanToCompletion', 'Failed' :type status: str or ~azure.servicefabric.models.DeploymentStatus - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -10323,15 +8538,7 @@ class DeployedServicePackageInfo(msrest.serialization.Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - version: Optional[str] = None, - status: Optional[Union[str, "DeploymentStatus"]] = None, - service_package_activation_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, version: str=None, status=None, service_package_activation_id: str=None, **kwargs) -> None: super(DeployedServicePackageInfo, self).__init__(**kwargs) self.name = name self.version = version @@ -10344,50 +8551,33 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param application_id: Required. The identity of the application. This is an encoded - representation of the application name. This is used in the REST APIs to identify the - application resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the application name is "fabric:/myapp/app1", - the application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. :type application_id: str :param service_manifest_name: Required. Service manifest name. :type service_manifest_name: str - :param service_package_instance_id: Required. Id of Service package instance. + :param service_package_instance_id: Required. Id of Service package + instance. :type service_package_instance_id: long - :param service_package_activation_id: Required. Id of Service package activation. + :param service_package_activation_id: Required. Id of Service package + activation. :type service_package_activation_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -10403,16 +8593,17 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'application_id': {'required': True}, 'service_manifest_name': {'required': True}, 'service_package_instance_id': {'required': True}, @@ -10429,11 +8620,11 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_id': {'key': 'ApplicationId', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, @@ -10449,30 +8640,8 @@ class DeployedServicePackageNewHealthReportEvent(ApplicationEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - application_id: str, - service_manifest_name: str, - service_package_instance_id: int, - service_package_activation_id: str, - node_name: str, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_manifest_name: str, service_package_instance_id: int, service_package_activation_id: str, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(DeployedServicePackageNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) - self.kind = 'DeployedServicePackageNewHealthReport' # type: str self.service_manifest_name = service_manifest_name self.service_package_instance_id = service_package_instance_id self.service_package_activation_id = service_package_activation_id @@ -10485,37 +8654,38 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'DeployedServicePackageNewHealthReport' class DeployedServicePackagesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for deployed service packages, containing health evaluations for each unhealthy deployed service package that impacted current aggregated health state. Can be returned when evaluating deployed application health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for deployed service packages, containing + health evaluations for each unhealthy deployed service package that + impacted current aggregated health state. Can be returned when evaluating + deployed application health and the aggregated health state is either Error + or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param total_count: Total number of deployed service packages of the deployed application in - the health store. + :param kind: Required. Constant filled by server. + :type kind: str + :param total_count: Total number of deployed service packages of the + deployed application in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy DeployedServicePackageHealthEvaluation that impacted the - aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + DeployedServicePackageHealthEvaluation that impacted the aggregated + health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -10523,56 +8693,52 @@ class DeployedServicePackagesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(DeployedServicePackagesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'DeployedServicePackages' # type: str self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeployedServicePackages' -class DeployedServiceReplicaDetailInfo(msrest.serialization.Model): +class DeployedServiceReplicaDetailInfo(Model): """Information about a Service Fabric service replica deployed on a node. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeployedStatefulServiceReplicaDetailInfo, DeployedStatelessServiceInstanceDetailInfo. + sub-classes are: DeployedStatefulServiceReplicaDetailInfo, + DeployedStatelessServiceInstanceDetailInfo All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: Full hierarchical name of the service in URI format starting with - ``fabric:``. + :param service_name: Full hierarchical name of the service in URI format + starting with `fabric:`. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle operation on a - stateful service replica or stateless service instance. Possible values include: "Unknown", - "None", "Open", "ChangeRole", "Close", "Abort". - :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the current service - operation in UTC format. - :type current_service_operation_start_time_utc: ~datetime.datetime + :param current_service_operation: Specifies the current active life-cycle + operation on a stateful service replica or stateless service instance. + Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', + 'Abort' + :type current_service_operation: str or + ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the + current service operation in UTC format. + :type current_service_operation_start_time_utc: datetime :param reported_load: List of load reported by replica. - :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] + :type reported_load: + list[~azure.servicefabric.models.LoadMetricReportInfo] + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -10580,77 +8746,75 @@ class DeployedServiceReplicaDetailInfo(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaDetailInfo', 'Stateless': 'DeployedStatelessServiceInstanceDetailInfo'} } - def __init__( - self, - *, - service_name: Optional[str] = None, - partition_id: Optional[str] = None, - current_service_operation: Optional[Union[str, "ServiceOperationName"]] = None, - current_service_operation_start_time_utc: Optional[datetime.datetime] = None, - reported_load: Optional[List["LoadMetricReportInfo"]] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, partition_id: str=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, **kwargs) -> None: super(DeployedServiceReplicaDetailInfo, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.service_name = service_name self.partition_id = partition_id self.current_service_operation = current_service_operation self.current_service_operation_start_time_utc = current_service_operation_start_time_utc self.reported_load = reported_load + self.service_kind = None -class DeployedServiceReplicaInfo(msrest.serialization.Model): +class DeployedServiceReplicaInfo(Model): """Information about a Service Fabric service replica deployed on a node. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeployedStatefulServiceReplicaInfo, DeployedStatelessServiceInstanceInfo. + sub-classes are: DeployedStatefulServiceReplicaInfo, + DeployedStatelessServiceInstanceInfo All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this replica. + :param code_package_name: The name of the code package that hosts this + replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or ChangeRole. + :param address: The last address returned by the replica in Open or + ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the replica. This will - be zero if the replica is down. In hyper-v containers this host process ID will be from - different kernel. + :param host_process_id: Host process ID of the process that is hosting the + replica. This will be zero if the replica is down. In hyper-v containers + this host process ID will be from different kernel. :type host_process_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -10658,7 +8822,6 @@ class DeployedServiceReplicaInfo(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -10668,28 +8831,15 @@ class DeployedServiceReplicaInfo(msrest.serialization.Model): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaInfo', 'Stateless': 'DeployedStatelessServiceInstanceInfo'} } - def __init__( - self, - *, - service_name: Optional[str] = None, - service_type_name: Optional[str] = None, - service_manifest_name: Optional[str] = None, - code_package_name: Optional[str] = None, - partition_id: Optional[str] = None, - replica_status: Optional[Union[str, "ReplicaStatus"]] = None, - address: Optional[str] = None, - service_package_activation_id: Optional[str] = None, - host_process_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, partition_id: str=None, replica_status=None, address: str=None, service_package_activation_id: str=None, host_process_id: str=None, **kwargs) -> None: super(DeployedServiceReplicaInfo, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.service_name = service_name self.service_type_name = service_type_name self.service_manifest_name = service_manifest_name @@ -10699,25 +8849,31 @@ def __init__( self.address = address self.service_package_activation_id = service_package_activation_id self.host_process_id = host_process_id + self.service_kind = None -class DeployedServiceTypeInfo(msrest.serialization.Model): - """Information about service type deployed on a node, information such as the status of the service type registration on a node. +class DeployedServiceTypeInfo(Model): + """Information about service type deployed on a node, information such as the + status of the service type registration on a node. - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that registered the service type. + :param code_package_name: The name of the code package that registered the + service type. :type code_package_name: str - :param status: The status of the service type registration on the node. Possible values - include: "Invalid", "Disabled", "Enabled", "Registered". - :type status: str or ~azure.servicefabric.models.ServiceTypeRegistrationStatus - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param status: The status of the service type registration on the node. + Possible values include: 'Invalid', 'Disabled', 'Enabled', 'Registered' + :type status: str or + ~azure.servicefabric.models.ServiceTypeRegistrationStatus + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ @@ -10730,16 +8886,7 @@ class DeployedServiceTypeInfo(msrest.serialization.Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__( - self, - *, - service_type_name: Optional[str] = None, - service_manifest_name: Optional[str] = None, - code_package_name: Optional[str] = None, - status: Optional[Union[str, "ServiceTypeRegistrationStatus"]] = None, - service_package_activation_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, status=None, service_package_activation_id: str=None, **kwargs) -> None: super(DeployedServiceTypeInfo, self).__init__(**kwargs) self.service_type_name = service_type_name self.service_manifest_name = service_manifest_name @@ -10749,54 +8896,71 @@ def __init__( class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo): - """Information about a stateful replica running in a code package. Note DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and replicaId. + """Information about a stateful replica running in a code package. Note + DeployedServiceReplicaQueryResult will contain duplicate data like + ServiceKind, ServiceName, PartitionId and replicaId. All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: Full hierarchical name of the service in URI format starting with - ``fabric:``. + :param service_name: Full hierarchical name of the service in URI format + starting with `fabric:`. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle operation on a - stateful service replica or stateless service instance. Possible values include: "Unknown", - "None", "Open", "ChangeRole", "Close", "Abort". - :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the current service - operation in UTC format. - :type current_service_operation_start_time_utc: ~datetime.datetime + :param current_service_operation: Specifies the current active life-cycle + operation on a stateful service replica or stateless service instance. + Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', + 'Abort' + :type current_service_operation: str or + ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the + current service operation in UTC format. + :type current_service_operation_start_time_utc: datetime :param reported_load: List of load reported by replica. - :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :type reported_load: + list[~azure.servicefabric.models.LoadMetricReportInfo] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str - :param current_replicator_operation: Specifies the operation currently being executed by the - Replicator. Possible values include: "Invalid", "None", "Open", "ChangeRole", "UpdateEpoch", - "Close", "Abort", "OnDataLoss", "WaitForCatchup", "Build". - :type current_replicator_operation: str or ~azure.servicefabric.models.ReplicatorOperationName - :param read_status: Specifies the access status of the partition. Possible values include: - "Invalid", "Granted", "ReconfigurationPending", "NotPrimary", "NoWriteQuorum". - :type read_status: str or ~azure.servicefabric.models.PartitionAccessStatus - :param write_status: Specifies the access status of the partition. Possible values include: - "Invalid", "Granted", "ReconfigurationPending", "NotPrimary", "NoWriteQuorum". - :type write_status: str or ~azure.servicefabric.models.PartitionAccessStatus - :param replicator_status: Represents a base class for primary or secondary replicator status. - Contains information about the service fabric replicator like the replication/copy queue - utilization, last acknowledgement received timestamp, etc. + :param current_replicator_operation: Specifies the operation currently + being executed by the Replicator. Possible values include: 'Invalid', + 'None', 'Open', 'ChangeRole', 'UpdateEpoch', 'Close', 'Abort', + 'OnDataLoss', 'WaitForCatchup', 'Build' + :type current_replicator_operation: str or + ~azure.servicefabric.models.ReplicatorOperationName + :param read_status: Specifies the access status of the partition. Possible + values include: 'Invalid', 'Granted', 'ReconfigurationPending', + 'NotPrimary', 'NoWriteQuorum' + :type read_status: str or + ~azure.servicefabric.models.PartitionAccessStatus + :param write_status: Specifies the access status of the partition. + Possible values include: 'Invalid', 'Granted', 'ReconfigurationPending', + 'NotPrimary', 'NoWriteQuorum' + :type write_status: str or + ~azure.servicefabric.models.PartitionAccessStatus + :param replicator_status: Represents a base class for primary or secondary + replicator status. + Contains information about the service fabric replicator like the + replication/copy queue utilization, last acknowledgement received + timestamp, etc. :type replicator_status: ~azure.servicefabric.models.ReplicatorStatus - :param replica_status: Key value store related information for the replica. - :type replica_status: ~azure.servicefabric.models.KeyValueStoreReplicaStatus - :param deployed_service_replica_query_result: Information about a stateful service replica - deployed on a node. + :param replica_status: Key value store related information for the + replica. + :type replica_status: + ~azure.servicefabric.models.KeyValueStoreReplicaStatus + :param deployed_service_replica_query_result: Information about a stateful + service replica deployed on a node. :type deployed_service_replica_query_result: ~azure.servicefabric.models.DeployedStatefulServiceReplicaInfo """ @@ -10806,12 +8970,12 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, 'current_replicator_operation': {'key': 'CurrentReplicatorOperation', 'type': 'str'}, 'read_status': {'key': 'ReadStatus', 'type': 'str'}, @@ -10821,25 +8985,8 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatefulServiceReplicaInfo'}, } - def __init__( - self, - *, - service_name: Optional[str] = None, - partition_id: Optional[str] = None, - current_service_operation: Optional[Union[str, "ServiceOperationName"]] = None, - current_service_operation_start_time_utc: Optional[datetime.datetime] = None, - reported_load: Optional[List["LoadMetricReportInfo"]] = None, - replica_id: Optional[str] = None, - current_replicator_operation: Optional[Union[str, "ReplicatorOperationName"]] = None, - read_status: Optional[Union[str, "PartitionAccessStatus"]] = None, - write_status: Optional[Union[str, "PartitionAccessStatus"]] = None, - replicator_status: Optional["ReplicatorStatus"] = None, - replica_status: Optional["KeyValueStoreReplicaStatus"] = None, - deployed_service_replica_query_result: Optional["DeployedStatefulServiceReplicaInfo"] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, partition_id: str=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, replica_id: str=None, current_replicator_operation=None, read_status=None, write_status=None, replicator_status=None, replica_status=None, deployed_service_replica_query_result=None, **kwargs) -> None: super(DeployedStatefulServiceReplicaDetailInfo, self).__init__(service_name=service_name, partition_id=partition_id, current_service_operation=current_service_operation, current_service_operation_start_time_utc=current_service_operation_start_time_utc, reported_load=reported_load, **kwargs) - self.service_kind = 'Stateful' # type: str self.replica_id = replica_id self.current_replicator_operation = current_replicator_operation self.read_status = read_status @@ -10847,6 +8994,7 @@ def __init__( self.replicator_status = replicator_status self.replica_status = replica_status self.deployed_service_replica_query_result = deployed_service_replica_query_result + self.service_kind = 'Stateful' class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): @@ -10854,50 +9002,61 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this replica. + :param code_package_name: The name of the code package that hosts this + replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or ChangeRole. + :param address: The last address returned by the replica in Open or + ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the replica. This will - be zero if the replica is down. In hyper-v containers this host process ID will be from - different kernel. + :param host_process_id: Host process ID of the process that is hosting the + replica. This will be zero if the replica is down. In hyper-v containers + this host process ID will be from different kernel. :type host_process_id: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str - :param replica_role: The role of a replica of a stateful service. Possible values include: - "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :param replica_role: The role of a replica of a stateful service. Possible + values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', + 'ActiveSecondary' :type replica_role: str or ~azure.servicefabric.models.ReplicaRole - :param reconfiguration_information: Information about current reconfiguration like phase, type, - previous configuration role of replica and reconfiguration start date time. - :type reconfiguration_information: ~azure.servicefabric.models.ReconfigurationInformation + :param reconfiguration_information: Information about current + reconfiguration like phase, type, previous configuration role of replica + and reconfiguration start date time. + :type reconfiguration_information: + ~azure.servicefabric.models.ReconfigurationInformation """ _validation = { @@ -10905,7 +9064,6 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -10915,67 +9073,58 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, 'reconfiguration_information': {'key': 'ReconfigurationInformation', 'type': 'ReconfigurationInformation'}, } - def __init__( - self, - *, - service_name: Optional[str] = None, - service_type_name: Optional[str] = None, - service_manifest_name: Optional[str] = None, - code_package_name: Optional[str] = None, - partition_id: Optional[str] = None, - replica_status: Optional[Union[str, "ReplicaStatus"]] = None, - address: Optional[str] = None, - service_package_activation_id: Optional[str] = None, - host_process_id: Optional[str] = None, - replica_id: Optional[str] = None, - replica_role: Optional[Union[str, "ReplicaRole"]] = None, - reconfiguration_information: Optional["ReconfigurationInformation"] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, partition_id: str=None, replica_status=None, address: str=None, service_package_activation_id: str=None, host_process_id: str=None, replica_id: str=None, replica_role=None, reconfiguration_information=None, **kwargs) -> None: super(DeployedStatefulServiceReplicaInfo, self).__init__(service_name=service_name, service_type_name=service_type_name, service_manifest_name=service_manifest_name, code_package_name=code_package_name, partition_id=partition_id, replica_status=replica_status, address=address, service_package_activation_id=service_package_activation_id, host_process_id=host_process_id, **kwargs) - self.service_kind = 'Stateful' # type: str self.replica_id = replica_id self.replica_role = replica_role self.reconfiguration_information = reconfiguration_information + self.service_kind = 'Stateful' class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInfo): - """Information about a stateless instance running in a code package. Note that DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and InstanceId. + """Information about a stateless instance running in a code package. Note that + DeployedServiceReplicaQueryResult will contain duplicate data like + ServiceKind, ServiceName, PartitionId and InstanceId. All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: Full hierarchical name of the service in URI format starting with - ``fabric:``. + :param service_name: Full hierarchical name of the service in URI format + starting with `fabric:`. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param current_service_operation: Specifies the current active life-cycle operation on a - stateful service replica or stateless service instance. Possible values include: "Unknown", - "None", "Open", "ChangeRole", "Close", "Abort". - :type current_service_operation: str or ~azure.servicefabric.models.ServiceOperationName - :param current_service_operation_start_time_utc: The start time of the current service - operation in UTC format. - :type current_service_operation_start_time_utc: ~datetime.datetime + :param current_service_operation: Specifies the current active life-cycle + operation on a stateful service replica or stateless service instance. + Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', + 'Abort' + :type current_service_operation: str or + ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the + current service operation in UTC format. + :type current_service_operation_start_time_utc: datetime :param reported_load: List of load reported by replica. - :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] - :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to - uniquely identify an instance of a partition of a stateless service. It is unique within a - partition and does not change for the lifetime of the instance. If the instance has failed over - on the same or different node, it will get a different value for the InstanceId. + :type reported_load: + list[~azure.servicefabric.models.LoadMetricReportInfo] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. :type instance_id: str - :param deployed_service_replica_query_result: Information about a stateless service instance - deployed on a node. + :param deployed_service_replica_query_result: Information about a + stateless service instance deployed on a node. :type deployed_service_replica_query_result: ~azure.servicefabric.models.DeployedStatelessServiceInstanceInfo """ @@ -10985,32 +9134,21 @@ class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInf } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatelessServiceInstanceInfo'}, } - def __init__( - self, - *, - service_name: Optional[str] = None, - partition_id: Optional[str] = None, - current_service_operation: Optional[Union[str, "ServiceOperationName"]] = None, - current_service_operation_start_time_utc: Optional[datetime.datetime] = None, - reported_load: Optional[List["LoadMetricReportInfo"]] = None, - instance_id: Optional[str] = None, - deployed_service_replica_query_result: Optional["DeployedStatelessServiceInstanceInfo"] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, partition_id: str=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, instance_id: str=None, deployed_service_replica_query_result=None, **kwargs) -> None: super(DeployedStatelessServiceInstanceDetailInfo, self).__init__(service_name=service_name, partition_id=partition_id, current_service_operation=current_service_operation, current_service_operation_start_time_utc=current_service_operation_start_time_utc, reported_load=reported_load, **kwargs) - self.service_kind = 'Stateless' # type: str self.instance_id = instance_id self.deployed_service_replica_query_result = deployed_service_replica_query_result + self.service_kind = 'Stateless' class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): @@ -11018,42 +9156,49 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param code_package_name: The name of the code package that hosts this replica. + :param code_package_name: The name of the code package that hosts this + replica. :type code_package_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param address: The last address returned by the replica in Open or ChangeRole. + :param address: The last address returned by the replica in Open or + ChangeRole. :type address: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process ID of the process that is hosting the replica. This will - be zero if the replica is down. In hyper-v containers this host process ID will be from - different kernel. + :param host_process_id: Host process ID of the process that is hosting the + replica. This will be zero if the replica is down. In hyper-v containers + this host process ID will be from different kernel. :type host_process_id: str - :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to - uniquely identify an instance of a partition of a stateless service. It is unique within a - partition and does not change for the lifetime of the instance. If the instance has failed over - on the same or different node, it will get a different value for the InstanceId. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -11062,7 +9207,6 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, @@ -11072,47 +9216,36 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): 'address': {'key': 'Address', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__( - self, - *, - service_name: Optional[str] = None, - service_type_name: Optional[str] = None, - service_manifest_name: Optional[str] = None, - code_package_name: Optional[str] = None, - partition_id: Optional[str] = None, - replica_status: Optional[Union[str, "ReplicaStatus"]] = None, - address: Optional[str] = None, - service_package_activation_id: Optional[str] = None, - host_process_id: Optional[str] = None, - instance_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, partition_id: str=None, replica_status=None, address: str=None, service_package_activation_id: str=None, host_process_id: str=None, instance_id: str=None, **kwargs) -> None: super(DeployedStatelessServiceInstanceInfo, self).__init__(service_name=service_name, service_type_name=service_type_name, service_manifest_name=service_manifest_name, code_package_name=code_package_name, partition_id=partition_id, replica_status=replica_status, address=address, service_package_activation_id=service_package_activation_id, host_process_id=host_process_id, **kwargs) - self.service_kind = 'Stateless' # type: str self.instance_id = instance_id + self.service_kind = 'Stateless' -class DeployServicePackageToNodeDescription(msrest.serialization.Model): - """Defines description for downloading packages associated with a service manifest to image cache on a Service Fabric node. +class DeployServicePackageToNodeDescription(Model): + """Defines description for downloading packages associated with a service + manifest to image cache on a Service Fabric node. All required parameters must be populated in order to send to Azure. - :param service_manifest_name: Required. The name of service manifest whose packages need to be - downloaded. + :param service_manifest_name: Required. The name of service manifest whose + packages need to be downloaded. :type service_manifest_name: str - :param application_type_name: Required. The application type name as defined in the application - manifest. + :param application_type_name: Required. The application type name as + defined in the application manifest. :type application_type_name: str - :param application_type_version: Required. The version of the application type as defined in - the application manifest. + :param application_type_version: Required. The version of the application + type as defined in the application manifest. :type application_type_version: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param package_sharing_policy: List of package sharing policy information. - :type package_sharing_policy: list[~azure.servicefabric.models.PackageSharingPolicyInfo] + :type package_sharing_policy: + list[~azure.servicefabric.models.PackageSharingPolicyInfo] """ _validation = { @@ -11130,16 +9263,7 @@ class DeployServicePackageToNodeDescription(msrest.serialization.Model): 'package_sharing_policy': {'key': 'PackageSharingPolicy', 'type': '[PackageSharingPolicyInfo]'}, } - def __init__( - self, - *, - service_manifest_name: str, - application_type_name: str, - application_type_version: str, - node_name: str, - package_sharing_policy: Optional[List["PackageSharingPolicyInfo"]] = None, - **kwargs - ): + def __init__(self, *, service_manifest_name: str, application_type_name: str, application_type_version: str, node_name: str, package_sharing_policy=None, **kwargs) -> None: super(DeployServicePackageToNodeDescription, self).__init__(**kwargs) self.service_manifest_name = service_manifest_name self.application_type_name = application_type_name @@ -11148,15 +9272,15 @@ def __init__( self.package_sharing_policy = package_sharing_policy -class DiagnosticsDescription(msrest.serialization.Model): +class DiagnosticsDescription(Model): """Describes the diagnostics options available. :param sinks: List of supported sinks that can be referenced. :type sinks: list[~azure.servicefabric.models.DiagnosticsSinkProperties] :param enabled: Status of whether or not sinks are enabled. :type enabled: bool - :param default_sink_refs: The sinks to be used if diagnostics is enabled. Sink choices can be - overridden at the service and code package level. + :param default_sink_refs: The sinks to be used if diagnostics is enabled. + Sink choices can be overridden at the service and code package level. :type default_sink_refs: list[str] """ @@ -11166,27 +9290,20 @@ class DiagnosticsDescription(msrest.serialization.Model): 'default_sink_refs': {'key': 'defaultSinkRefs', 'type': '[str]'}, } - def __init__( - self, - *, - sinks: Optional[List["DiagnosticsSinkProperties"]] = None, - enabled: Optional[bool] = None, - default_sink_refs: Optional[List[str]] = None, - **kwargs - ): + def __init__(self, *, sinks=None, enabled: bool=None, default_sink_refs=None, **kwargs) -> None: super(DiagnosticsDescription, self).__init__(**kwargs) self.sinks = sinks self.enabled = enabled self.default_sink_refs = default_sink_refs -class DiagnosticsRef(msrest.serialization.Model): +class DiagnosticsRef(Model): """Reference to sinks in DiagnosticsDescription. :param enabled: Status of whether or not sinks are enabled. :type enabled: bool - :param sink_refs: List of sinks to be used if enabled. References the list of sinks in - DiagnosticsDescription. + :param sink_refs: List of sinks to be used if enabled. References the list + of sinks in DiagnosticsDescription. :type sink_refs: list[str] """ @@ -11195,26 +9312,21 @@ class DiagnosticsRef(msrest.serialization.Model): 'sink_refs': {'key': 'sinkRefs', 'type': '[str]'}, } - def __init__( - self, - *, - enabled: Optional[bool] = None, - sink_refs: Optional[List[str]] = None, - **kwargs - ): + def __init__(self, *, enabled: bool=None, sink_refs=None, **kwargs) -> None: super(DiagnosticsRef, self).__init__(**kwargs) self.enabled = enabled self.sink_refs = sink_refs -class DisableBackupDescription(msrest.serialization.Model): - """It describes the body parameters while disabling backup of a backup entity(Application/Service/Partition). +class DisableBackupDescription(Model): + """It describes the body parameters while disabling backup of a backup + entity(Application/Service/Partition). All required parameters must be populated in order to send to Azure. - :param clean_backup: Required. Boolean flag to delete backups. It can be set to true for - deleting all the backups which were created for the backup entity that is getting disabled for - backup. + :param clean_backup: Required. Boolean flag to delete backups. It can be + set to true for deleting all the backups which were created for the backup + entity that is getting disabled for backup. :type clean_backup: bool """ @@ -11226,22 +9338,17 @@ class DisableBackupDescription(msrest.serialization.Model): 'clean_backup': {'key': 'CleanBackup', 'type': 'bool'}, } - def __init__( - self, - *, - clean_backup: bool, - **kwargs - ): + def __init__(self, *, clean_backup: bool, **kwargs) -> None: super(DisableBackupDescription, self).__init__(**kwargs) self.clean_backup = clean_backup -class DiskInfo(msrest.serialization.Model): +class DiskInfo(Model): """Information about the disk. - :param capacity: the disk size in bytes. + :param capacity: the disk size in bytes :type capacity: str - :param available_space: the available disk space in bytes. + :param available_space: the available disk space in bytes :type available_space: str """ @@ -11250,13 +9357,7 @@ class DiskInfo(msrest.serialization.Model): 'available_space': {'key': 'AvailableSpace', 'type': 'str'}, } - def __init__( - self, - *, - capacity: Optional[str] = None, - available_space: Optional[str] = None, - **kwargs - ): + def __init__(self, *, capacity: str=None, available_space: str=None, **kwargs) -> None: super(DiskInfo, self).__init__(**kwargs) self.capacity = capacity self.available_space = available_space @@ -11267,10 +9368,8 @@ class DoublePropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str :param data: Required. The data of the property value. :type data: float """ @@ -11285,33 +9384,27 @@ class DoublePropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'float'}, } - def __init__( - self, - *, - data: float, - **kwargs - ): + def __init__(self, *, data: float, **kwargs) -> None: super(DoublePropertyValue, self).__init__(**kwargs) - self.kind = 'Double' # type: str self.data = data + self.kind = 'Double' class DsmsAzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Dsms Azure blob store used for storing and enumerating backups. + """Describes the parameters for Dsms Azure blob store used for storing and + enumerating backups. All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param storage_credentials_source_location: Required. The source location of the storage - credentials to connect to the Dsms Azure blob store. + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param storage_credentials_source_location: Required. The source location + of the storage credentials to connect to the Dsms Azure blob store. :type storage_credentials_source_location: str - :param container_name: Required. The name of the container in the blob store to store and - enumerate backups from. + :param container_name: Required. The name of the container in the blob + store to store and enumerate backups from. :type container_name: str """ @@ -11322,33 +9415,26 @@ class DsmsAzureBlobBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'storage_credentials_source_location': {'key': 'StorageCredentialsSourceLocation', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__( - self, - *, - storage_credentials_source_location: str, - container_name: str, - friendly_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, storage_credentials_source_location: str, container_name: str, friendly_name: str=None, **kwargs) -> None: super(DsmsAzureBlobBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) - self.storage_kind = 'DsmsAzureBlobStore' # type: str self.storage_credentials_source_location = storage_credentials_source_location self.container_name = container_name + self.storage_kind = 'DsmsAzureBlobStore' -class EnableBackupDescription(msrest.serialization.Model): +class EnableBackupDescription(Model): """Specifies the parameters needed to enable periodic backup. All required parameters must be populated in order to send to Azure. - :param backup_policy_name: Required. Name of the backup policy to be used for enabling periodic - backups. + :param backup_policy_name: Required. Name of the backup policy to be used + for enabling periodic backups. :type backup_policy_name: str """ @@ -11360,17 +9446,12 @@ class EnableBackupDescription(msrest.serialization.Model): 'backup_policy_name': {'key': 'BackupPolicyName', 'type': 'str'}, } - def __init__( - self, - *, - backup_policy_name: str, - **kwargs - ): + def __init__(self, *, backup_policy_name: str, **kwargs) -> None: super(EnableBackupDescription, self).__init__(**kwargs) self.backup_policy_name = backup_policy_name -class EndpointProperties(msrest.serialization.Model): +class EndpointProperties(Model): """Describes a container endpoint. All required parameters must be populated in order to send to Azure. @@ -11390,19 +9471,13 @@ class EndpointProperties(msrest.serialization.Model): 'port': {'key': 'port', 'type': 'int'}, } - def __init__( - self, - *, - name: str, - port: Optional[int] = None, - **kwargs - ): + def __init__(self, *, name: str, port: int=None, **kwargs) -> None: super(EndpointProperties, self).__init__(**kwargs) self.name = name self.port = port -class EndpointRef(msrest.serialization.Model): +class EndpointRef(Model): """Describes a reference to a service endpoint. :param name: Name of the endpoint. @@ -11413,30 +9488,23 @@ class EndpointRef(msrest.serialization.Model): 'name': {'key': 'name', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, **kwargs) -> None: super(EndpointRef, self).__init__(**kwargs) self.name = name -class SafetyCheck(msrest.serialization.Model): - """Represents a safety check performed by service fabric before continuing with the operations. These checks ensure the availability of the service and the reliability of the state. +class SafetyCheck(Model): + """Represents a safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service + and the reliability of the state. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SeedNodeSafetyCheck, PartitionSafetyCheck. + sub-classes are: PartitionSafetyCheck, SeedNodeSafetyCheck All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -11448,32 +9516,30 @@ class SafetyCheck(msrest.serialization.Model): } _subtype_map = { - 'kind': {'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck', 'PartitionSafetyCheck': 'PartitionSafetyCheck'} + 'kind': {'PartitionSafetyCheck': 'PartitionSafetyCheck', 'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(SafetyCheck, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class PartitionSafetyCheck(SafetyCheck): - """Represents a safety check for the service partition being performed by service fabric before continuing with operations. + """Represents a safety check for the service partition being performed by + service fabric before continuing with operations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: EnsureAvailabilitySafetyCheck, EnsurePartitionQuorumSafetyCheck, WaitForInbuildReplicaSafetyCheck, WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, WaitForReconfigurationSafetyCheck. + sub-classes are: EnsureAvailabilitySafetyCheck, + EnsurePartitionQuorumSafetyCheck, WaitForInbuildReplicaSafetyCheck, + WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, + WaitForReconfigurationSafetyCheck All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -11490,29 +9556,23 @@ class PartitionSafetyCheck(SafetyCheck): 'kind': {'EnsureAvailability': 'EnsureAvailabilitySafetyCheck', 'EnsurePartitionQuorum': 'EnsurePartitionQuorumSafetyCheck', 'WaitForInbuildReplica': 'WaitForInbuildReplicaSafetyCheck', 'WaitForPrimaryPlacement': 'WaitForPrimaryPlacementSafetyCheck', 'WaitForPrimarySwap': 'WaitForPrimarySwapSafetyCheck', 'WaitForReconfiguration': 'WaitForReconfigurationSafetyCheck'} } - def __init__( - self, - *, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, **kwargs) -> None: super(PartitionSafetyCheck, self).__init__(**kwargs) - self.kind = 'PartitionSafetyCheck' # type: str self.partition_id = partition_id + self.kind = 'PartitionSafetyCheck' class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): - """Safety check that waits to ensure the availability of the partition. It waits until there are replicas available such that bringing down this replica will not cause availability loss for the partition. + """Safety check that waits to ensure the availability of the partition. It + waits until there are replicas available such that bringing down this + replica will not cause availability loss for the partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -11525,28 +9585,21 @@ class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, **kwargs) -> None: super(EnsureAvailabilitySafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'EnsureAvailability' # type: str + self.kind = 'EnsureAvailability' class EnsurePartitionQuorumSafetyCheck(PartitionSafetyCheck): - """Safety check that ensures that a quorum of replicas are not lost for a partition. + """Safety check that ensures that a quorum of replicas are not lost for a + partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -11559,24 +9612,21 @@ class EnsurePartitionQuorumSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, **kwargs) -> None: super(EnsurePartitionQuorumSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'EnsurePartitionQuorum' # type: str + self.kind = 'EnsurePartitionQuorum' -class EntityKindHealthStateCount(msrest.serialization.Model): +class EntityKindHealthStateCount(Model): """Represents health state count for entities of the specified entity kind. - :param entity_kind: The entity kind for which health states are evaluated. Possible values - include: "Invalid", "Node", "Partition", "Service", "Application", "Replica", - "DeployedApplication", "DeployedServicePackage", "Cluster". + :param entity_kind: The entity kind for which health states are evaluated. + Possible values include: 'Invalid', 'Node', 'Partition', 'Service', + 'Application', 'Replica', 'DeployedApplication', 'DeployedServicePackage', + 'Cluster' :type entity_kind: str or ~azure.servicefabric.models.EntityKind - :param health_state_count: The health state count for the entities of the specified kind. + :param health_state_count: The health state count for the entities of the + specified kind. :type health_state_count: ~azure.servicefabric.models.HealthStateCount """ @@ -11585,28 +9635,23 @@ class EntityKindHealthStateCount(msrest.serialization.Model): 'health_state_count': {'key': 'HealthStateCount', 'type': 'HealthStateCount'}, } - def __init__( - self, - *, - entity_kind: Optional[Union[str, "EntityKind"]] = None, - health_state_count: Optional["HealthStateCount"] = None, - **kwargs - ): + def __init__(self, *, entity_kind=None, health_state_count=None, **kwargs) -> None: super(EntityKindHealthStateCount, self).__init__(**kwargs) self.entity_kind = entity_kind self.health_state_count = health_state_count -class EnvironmentVariable(msrest.serialization.Model): +class EnvironmentVariable(Model): """Describes an environment variable for the container. - :param type: The type of the environment variable being given in value. Possible values - include: "ClearText", "KeyVaultReference", "SecretValueReference". Default value: "ClearText". + :param type: The type of the environment variable being given in value. + Possible values include: 'ClearText', 'KeyVaultReference', + 'SecretValueReference'. Default value: "ClearText" . :type type: str or ~azure.servicefabric.models.EnvironmentVariableType :param name: The name of the environment variable. :type name: str - :param value: The value of the environment variable, will be processed based on the type - provided. + :param value: The value of the environment variable, will be processed + based on the type provided. :type value: str """ @@ -11616,30 +9661,28 @@ class EnvironmentVariable(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - *, - type: Optional[Union[str, "EnvironmentVariableType"]] = "ClearText", - name: Optional[str] = None, - value: Optional[str] = None, - **kwargs - ): + def __init__(self, *, type="ClearText", name: str=None, value: str=None, **kwargs) -> None: super(EnvironmentVariable, self).__init__(**kwargs) self.type = type self.name = name self.value = value -class Epoch(msrest.serialization.Model): - """An Epoch is a configuration number for the partition as a whole. When the configuration of the replica set changes, for example when the Primary replica changes, the operations that are replicated from the new Primary replica are said to be a new Epoch from the ones which were sent by the old Primary replica. +class Epoch(Model): + """An Epoch is a configuration number for the partition as a whole. When the + configuration of the replica set changes, for example when the Primary + replica changes, the operations that are replicated from the new Primary + replica are said to be a new Epoch from the ones which were sent by the old + Primary replica. - :param configuration_version: The current configuration number of this Epoch. The configuration - number is an increasing value that is updated whenever the configuration of this replica set - changes. + :param configuration_version: The current configuration number of this + Epoch. The configuration number is an increasing value that is updated + whenever the configuration of this replica set changes. :type configuration_version: str - :param data_loss_version: The current data loss number of this Epoch. The data loss number - property is an increasing value which is updated whenever data loss is suspected, as when loss - of a quorum of replicas in the replica set that includes the Primary replica. + :param data_loss_version: The current data loss number of this Epoch. The + data loss number property is an increasing value which is updated whenever + data loss is suspected, as when loss of a quorum of replicas in the + replica set that includes the Primary replica. :type data_loss_version: str """ @@ -11648,46 +9691,38 @@ class Epoch(msrest.serialization.Model): 'data_loss_version': {'key': 'DataLossVersion', 'type': 'str'}, } - def __init__( - self, - *, - configuration_version: Optional[str] = None, - data_loss_version: Optional[str] = None, - **kwargs - ): + def __init__(self, *, configuration_version: str=None, data_loss_version: str=None, **kwargs) -> None: super(Epoch, self).__init__(**kwargs) self.configuration_version = configuration_version self.data_loss_version = data_loss_version class EventHealthEvaluation(HealthEvaluation): - """Represents health evaluation of a HealthEvent that was reported on the entity. -The health evaluation is returned when evaluating health of an entity results in Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation of a HealthEvent that was reported on the + entity. + The health evaluation is returned when evaluating health of an entity + results in Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param consider_warning_as_error: Indicates whether warnings are treated with the same severity - as errors. The field is specified in the health policy used to evaluate the entity. + :param kind: Required. Constant filled by server. + :type kind: str + :param consider_warning_as_error: Indicates whether warnings are treated + with the same severity as errors. The field is specified in the health + policy used to evaluate the entity. :type consider_warning_as_error: bool - :param unhealthy_event: Represents health information reported on a health entity, such as - cluster, application or node, with additional metadata added by the Health Manager. + :param unhealthy_event: Represents health information reported on a health + entity, such as cluster, application or node, with additional metadata + added by the Health Manager. :type unhealthy_event: ~azure.servicefabric.models.HealthEvent """ @@ -11696,173 +9731,152 @@ class EventHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'consider_warning_as_error': {'key': 'ConsiderWarningAsError', 'type': 'bool'}, 'unhealthy_event': {'key': 'UnhealthyEvent', 'type': 'HealthEvent'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - consider_warning_as_error: Optional[bool] = None, - unhealthy_event: Optional["HealthEvent"] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, consider_warning_as_error: bool=None, unhealthy_event=None, **kwargs) -> None: super(EventHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Event' # type: str self.consider_warning_as_error = consider_warning_as_error self.unhealthy_event = unhealthy_event + self.kind = 'Event' class ExecutingFaultsChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos has decided on the faults for an iteration. This Chaos event contains the details of the faults as a list of strings. + """Describes a Chaos event that gets generated when Chaos has decided on the + faults for an iteration. This Chaos event contains the details of the + faults as a list of strings. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param faults: List of string description of the faults that Chaos decided to execute in an - iteration. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param faults: List of string description of the faults that Chaos decided + to execute in an iteration. :type faults: list[str] """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'faults': {'key': 'Faults', 'type': '[str]'}, } - def __init__( - self, - *, - time_stamp_utc: datetime.datetime, - faults: Optional[List[str]] = None, - **kwargs - ): + def __init__(self, *, time_stamp_utc, faults=None, **kwargs) -> None: super(ExecutingFaultsChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) - self.kind = 'ExecutingFaults' # type: str self.faults = faults + self.kind = 'ExecutingFaults' -class ProvisionApplicationTypeDescriptionBase(msrest.serialization.Model): - """Represents the type of registration or provision requested, and if the operation needs to be asynchronous or not. Supported types of provision operations are from either image store or external store. +class ProvisionApplicationTypeDescriptionBase(Model): + """Represents the type of registration or provision requested, and if the + operation needs to be asynchronous or not. Supported types of provision + operations are from either image store or external store. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ExternalStoreProvisionApplicationTypeDescription, ProvisionApplicationTypeDescription. + sub-classes are: ProvisionApplicationTypeDescription, + ExternalStoreProvisionApplicationTypeDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of application type registration or provision requested. The - application package can be registered or provisioned either from the image store or from an - external store. Following are the kinds of the application type provision.Constant filled by - server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". - :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind - :param async_property: Required. Indicates whether or not provisioning should occur - asynchronously. When set to true, the provision operation returns when the request is accepted - by the system, and the provision operation continues without any timeout limit. The default - value is false. For large application packages, we recommend setting the value to true. + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. :type async_property: bool + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { - 'kind': {'required': True}, 'async_property': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'async_property': {'key': 'Async', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { - 'kind': {'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription', 'ImageStorePath': 'ProvisionApplicationTypeDescription'} + 'kind': {'ImageStorePath': 'ProvisionApplicationTypeDescription', 'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription'} } - def __init__( - self, - *, - async_property: bool, - **kwargs - ): + def __init__(self, *, async_property: bool, **kwargs) -> None: super(ProvisionApplicationTypeDescriptionBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.async_property = async_property + self.kind = None class ExternalStoreProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): - """Describes the operation to register or provision an application type using an application package from an external store instead of a package uploaded to the Service Fabric image store. + """Describes the operation to register or provision an application type using + an application package from an external store instead of a package uploaded + to the Service Fabric image store. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of application type registration or provision requested. The - application package can be registered or provisioned either from the image store or from an - external store. Following are the kinds of the application type provision.Constant filled by - server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". - :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind - :param async_property: Required. Indicates whether or not provisioning should occur - asynchronously. When set to true, the provision operation returns when the request is accepted - by the system, and the provision operation continues without any timeout limit. The default - value is false. For large application packages, we recommend setting the value to true. + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. :type async_property: bool - :param application_package_download_uri: Required. The path to the '.sfpkg' application package - from where the application package can be downloaded using HTTP or HTTPS protocols. The - application package can be stored in an external store that provides GET operation to download - the file. Supported protocols are HTTP and HTTPS, and the path must allow READ access. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_package_download_uri: Required. The path to the + '.sfpkg' application package from where the application package can be + downloaded using HTTP or HTTPS protocols. The application package can be + stored in an external store that provides GET operation to download the + file. Supported protocols are HTTP and HTTPS, and the path must allow READ + access. :type application_package_download_uri: str - :param application_type_name: Required. The application type name represents the name of the - application type found in the application manifest. + :param application_type_name: Required. The application type name + represents the name of the application type found in the application + manifest. :type application_type_name: str - :param application_type_version: Required. The application type version represents the version - of the application type found in the application manifest. + :param application_type_version: Required. The application type version + represents the version of the application type found in the application + manifest. :type application_type_version: str """ _validation = { - 'kind': {'required': True}, 'async_property': {'required': True}, + 'kind': {'required': True}, 'application_package_download_uri': {'required': True}, 'application_type_name': {'required': True}, 'application_type_version': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'async_property': {'key': 'Async', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_package_download_uri': {'key': 'ApplicationPackageDownloadUri', 'type': 'str'}, 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, } - def __init__( - self, - *, - async_property: bool, - application_package_download_uri: str, - application_type_name: str, - application_type_version: str, - **kwargs - ): + def __init__(self, *, async_property: bool, application_package_download_uri: str, application_type_name: str, application_type_version: str, **kwargs) -> None: super(ExternalStoreProvisionApplicationTypeDescription, self).__init__(async_property=async_property, **kwargs) - self.kind = 'ExternalStore' # type: str self.application_package_download_uri = application_package_download_uri self.application_type_name = application_type_name self.application_type_version = application_type_version + self.kind = 'ExternalStore' -class FabricCodeVersionInfo(msrest.serialization.Model): +class FabricCodeVersionInfo(Model): """Information about a Service Fabric code version. :param code_version: The product version of Service Fabric. @@ -11873,17 +9887,12 @@ class FabricCodeVersionInfo(msrest.serialization.Model): 'code_version': {'key': 'CodeVersion', 'type': 'str'}, } - def __init__( - self, - *, - code_version: Optional[str] = None, - **kwargs - ): + def __init__(self, *, code_version: str=None, **kwargs) -> None: super(FabricCodeVersionInfo, self).__init__(**kwargs) self.code_version = code_version -class FabricConfigVersionInfo(msrest.serialization.Model): +class FabricConfigVersionInfo(Model): """Information about a Service Fabric config version. :param config_version: The config version of Service Fabric. @@ -11894,22 +9903,20 @@ class FabricConfigVersionInfo(msrest.serialization.Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__( - self, - *, - config_version: Optional[str] = None, - **kwargs - ): + def __init__(self, *, config_version: str=None, **kwargs) -> None: super(FabricConfigVersionInfo, self).__init__(**kwargs) self.config_version = config_version -class FabricError(msrest.serialization.Model): - """The REST API operations for Service Fabric return standard HTTP status codes. This type defines the additional information returned from the Service Fabric API operations that are not successful. +class FabricError(Model): + """The REST API operations for Service Fabric return standard HTTP status + codes. This type defines the additional information returned from the + Service Fabric API operations that are not successful. All required parameters must be populated in order to send to Azure. - :param error: Required. Error object containing error code and error message. + :param error: Required. Error object containing error code and error + message. :type error: ~azure.servicefabric.models.FabricErrorError """ @@ -11921,184 +9928,184 @@ class FabricError(msrest.serialization.Model): 'error': {'key': 'Error', 'type': 'FabricErrorError'}, } - def __init__( - self, - *, - error: "FabricErrorError", - **kwargs - ): + def __init__(self, *, error, **kwargs) -> None: super(FabricError, self).__init__(**kwargs) self.error = error -class FabricErrorError(msrest.serialization.Model): +class FabricErrorException(HttpOperationError): + """Server responsed with exception of type: 'FabricError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(FabricErrorException, self).__init__(deserialize, response, 'FabricError', *args) + + +class FabricErrorError(Model): """Error object containing error code and error message. All required parameters must be populated in order to send to Azure. - :param code: Required. Defines the fabric error codes that be returned as part of the error - object in response to Service Fabric API operations that are not successful. Following are the - error code values that can be returned for a specific HTTP status code. - - - * - Possible values of the error code for HTTP status code 400 (Bad Request) - - - * "FABRIC_E_INVALID_PARTITION_KEY" - * "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" - * "FABRIC_E_INVALID_ADDRESS" - * "FABRIC_E_APPLICATION_NOT_UPGRADING" - * "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" - * "FABRIC_E_FABRIC_NOT_UPGRADING" - * "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" - * "FABRIC_E_INVALID_CONFIGURATION" - * "FABRIC_E_INVALID_NAME_URI" - * "FABRIC_E_PATH_TOO_LONG" - * "FABRIC_E_KEY_TOO_LARGE" - * "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" - * "FABRIC_E_INVALID_ATOMIC_GROUP" - * "FABRIC_E_VALUE_EMPTY" - * "FABRIC_E_BACKUP_IS_ENABLED" - * "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" - * "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" - * "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" - * "E_INVALIDARG" - - * - Possible values of the error code for HTTP status code 404 (Not Found) - - - * "FABRIC_E_NODE_NOT_FOUND" - * "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" - * "FABRIC_E_APPLICATION_NOT_FOUND" - * "FABRIC_E_SERVICE_TYPE_NOT_FOUND" - * "FABRIC_E_SERVICE_DOES_NOT_EXIST" - * "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" - * "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" - * "FABRIC_E_PARTITION_NOT_FOUND" - * "FABRIC_E_REPLICA_DOES_NOT_EXIST" - * "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" - * "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" - * "FABRIC_E_DIRECTORY_NOT_FOUND" - * "FABRIC_E_FABRIC_VERSION_NOT_FOUND" - * "FABRIC_E_FILE_NOT_FOUND" - * "FABRIC_E_NAME_DOES_NOT_EXIST" - * "FABRIC_E_PROPERTY_DOES_NOT_EXIST" - * "FABRIC_E_ENUMERATION_COMPLETED" - * "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" - * "FABRIC_E_KEY_NOT_FOUND" - * "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" - * "FABRIC_E_BACKUP_NOT_ENABLED" - * "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" - * "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" - * "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" - - * - Possible values of the error code for HTTP status code 409 (Conflict) - - - * "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" - * "FABRIC_E_APPLICATION_ALREADY_EXISTS" - * "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" - * "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" - * "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" - * "FABRIC_E_SERVICE_ALREADY_EXISTS" - * "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" - * "FABRIC_E_APPLICATION_TYPE_IN_USE" - * "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" - * "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" - * "FABRIC_E_FABRIC_VERSION_IN_USE" - * "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" - * "FABRIC_E_NAME_ALREADY_EXISTS" - * "FABRIC_E_NAME_NOT_EMPTY" - * "FABRIC_E_PROPERTY_CHECK_FAILED" - * "FABRIC_E_SERVICE_METADATA_MISMATCH" - * "FABRIC_E_SERVICE_TYPE_MISMATCH" - * "FABRIC_E_HEALTH_STALE_REPORT" - * "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" - * "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" - * "FABRIC_E_INSTANCE_ID_MISMATCH" - * "FABRIC_E_BACKUP_IN_PROGRESS" - * "FABRIC_E_RESTORE_IN_PROGRESS" - * "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" - - * - Possible values of the error code for HTTP status code 413 (Request Entity Too Large) - - - * "FABRIC_E_VALUE_TOO_LARGE" - - * - Possible values of the error code for HTTP status code 500 (Internal Server Error) - - - * "FABRIC_E_NODE_IS_UP" - * "E_FAIL" - * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" - * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" - * "FABRIC_E_VOLUME_ALREADY_EXISTS" - * "FABRIC_E_VOLUME_NOT_FOUND" - * "SerializationError" - - * - Possible values of the error code for HTTP status code 503 (Service Unavailable) - - - * "FABRIC_E_NO_WRITE_QUORUM" - * "FABRIC_E_NOT_PRIMARY" - * "FABRIC_E_NOT_READY" - * "FABRIC_E_RECONFIGURATION_PENDING" - * "FABRIC_E_SERVICE_OFFLINE" - * "E_ABORT" - * "FABRIC_E_VALUE_TOO_LARGE" - - * - Possible values of the error code for HTTP status code 504 (Gateway Timeout) - - - * "FABRIC_E_COMMUNICATION_ERROR" - * "FABRIC_E_OPERATION_NOT_COMPLETE" - * "FABRIC_E_TIMEOUT". Possible values include: "FABRIC_E_INVALID_PARTITION_KEY", - "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR", "FABRIC_E_INVALID_ADDRESS", - "FABRIC_E_APPLICATION_NOT_UPGRADING", "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR", - "FABRIC_E_FABRIC_NOT_UPGRADING", "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR", - "FABRIC_E_INVALID_CONFIGURATION", "FABRIC_E_INVALID_NAME_URI", "FABRIC_E_PATH_TOO_LONG", - "FABRIC_E_KEY_TOO_LARGE", "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED", - "FABRIC_E_INVALID_ATOMIC_GROUP", "FABRIC_E_VALUE_EMPTY", "FABRIC_E_NODE_NOT_FOUND", - "FABRIC_E_APPLICATION_TYPE_NOT_FOUND", "FABRIC_E_APPLICATION_NOT_FOUND", - "FABRIC_E_SERVICE_TYPE_NOT_FOUND", "FABRIC_E_SERVICE_DOES_NOT_EXIST", - "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND", "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND", - "FABRIC_E_PARTITION_NOT_FOUND", "FABRIC_E_REPLICA_DOES_NOT_EXIST", - "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST", "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND", - "FABRIC_E_DIRECTORY_NOT_FOUND", "FABRIC_E_FABRIC_VERSION_NOT_FOUND", "FABRIC_E_FILE_NOT_FOUND", - "FABRIC_E_NAME_DOES_NOT_EXIST", "FABRIC_E_PROPERTY_DOES_NOT_EXIST", - "FABRIC_E_ENUMERATION_COMPLETED", "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND", - "FABRIC_E_KEY_NOT_FOUND", "FABRIC_E_HEALTH_ENTITY_NOT_FOUND", - "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS", "FABRIC_E_APPLICATION_ALREADY_EXISTS", - "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION", - "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS", "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS", - "FABRIC_E_SERVICE_ALREADY_EXISTS", "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS", - "FABRIC_E_APPLICATION_TYPE_IN_USE", "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION", - "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS", "FABRIC_E_FABRIC_VERSION_IN_USE", - "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS", "FABRIC_E_NAME_ALREADY_EXISTS", - "FABRIC_E_NAME_NOT_EMPTY", "FABRIC_E_PROPERTY_CHECK_FAILED", - "FABRIC_E_SERVICE_METADATA_MISMATCH", "FABRIC_E_SERVICE_TYPE_MISMATCH", - "FABRIC_E_HEALTH_STALE_REPORT", "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED", - "FABRIC_E_NODE_HAS_NOT_STOPPED_YET", "FABRIC_E_INSTANCE_ID_MISMATCH", - "FABRIC_E_VALUE_TOO_LARGE", "FABRIC_E_NO_WRITE_QUORUM", "FABRIC_E_NOT_PRIMARY", - "FABRIC_E_NOT_READY", "FABRIC_E_RECONFIGURATION_PENDING", "FABRIC_E_SERVICE_OFFLINE", - "E_ABORT", "FABRIC_E_COMMUNICATION_ERROR", "FABRIC_E_OPERATION_NOT_COMPLETE", - "FABRIC_E_TIMEOUT", "FABRIC_E_NODE_IS_UP", "E_FAIL", "FABRIC_E_BACKUP_IS_ENABLED", - "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH", "FABRIC_E_INVALID_FOR_STATELESS_SERVICES", - "FABRIC_E_BACKUP_NOT_ENABLED", "FABRIC_E_BACKUP_POLICY_NOT_EXISTING", - "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING", "FABRIC_E_BACKUP_IN_PROGRESS", - "FABRIC_E_RESTORE_IN_PROGRESS", "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING", - "FABRIC_E_INVALID_SERVICE_SCALING_POLICY", "E_INVALIDARG", - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS", - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND", "FABRIC_E_VOLUME_ALREADY_EXISTS", - "FABRIC_E_VOLUME_NOT_FOUND", "SerializationError", - "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR". + :param code: Required. Defines the fabric error codes that be returned as + part of the error object in response to Service Fabric API operations that + are not successful. Following are the error code values that can be + returned for a specific HTTP status code. + - Possible values of the error code for HTTP status code 400 (Bad Request) + - "FABRIC_E_INVALID_PARTITION_KEY" + - "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" + - "FABRIC_E_INVALID_ADDRESS" + - "FABRIC_E_APPLICATION_NOT_UPGRADING" + - "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" + - "FABRIC_E_FABRIC_NOT_UPGRADING" + - "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" + - "FABRIC_E_INVALID_CONFIGURATION" + - "FABRIC_E_INVALID_NAME_URI" + - "FABRIC_E_PATH_TOO_LONG" + - "FABRIC_E_KEY_TOO_LARGE" + - "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" + - "FABRIC_E_INVALID_ATOMIC_GROUP" + - "FABRIC_E_VALUE_EMPTY" + - "FABRIC_E_BACKUP_IS_ENABLED" + - "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + - "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + - "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + - "E_INVALIDARG" + - Possible values of the error code for HTTP status code 404 (Not Found) + - "FABRIC_E_NODE_NOT_FOUND" + - "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" + - "FABRIC_E_APPLICATION_NOT_FOUND" + - "FABRIC_E_SERVICE_TYPE_NOT_FOUND" + - "FABRIC_E_SERVICE_DOES_NOT_EXIST" + - "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" + - "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" + - "FABRIC_E_PARTITION_NOT_FOUND" + - "FABRIC_E_REPLICA_DOES_NOT_EXIST" + - "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" + - "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" + - "FABRIC_E_DIRECTORY_NOT_FOUND" + - "FABRIC_E_FABRIC_VERSION_NOT_FOUND" + - "FABRIC_E_FILE_NOT_FOUND" + - "FABRIC_E_NAME_DOES_NOT_EXIST" + - "FABRIC_E_PROPERTY_DOES_NOT_EXIST" + - "FABRIC_E_ENUMERATION_COMPLETED" + - "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" + - "FABRIC_E_KEY_NOT_FOUND" + - "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + - "FABRIC_E_BACKUP_NOT_ENABLED" + - "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + - "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" + - "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" + - Possible values of the error code for HTTP status code 409 (Conflict) + - "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" + - "FABRIC_E_APPLICATION_ALREADY_EXISTS" + - "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" + - "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" + - "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" + - "FABRIC_E_SERVICE_ALREADY_EXISTS" + - "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" + - "FABRIC_E_APPLICATION_TYPE_IN_USE" + - "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" + - "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" + - "FABRIC_E_FABRIC_VERSION_IN_USE" + - "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" + - "FABRIC_E_NAME_ALREADY_EXISTS" + - "FABRIC_E_NAME_NOT_EMPTY" + - "FABRIC_E_PROPERTY_CHECK_FAILED" + - "FABRIC_E_SERVICE_METADATA_MISMATCH" + - "FABRIC_E_SERVICE_TYPE_MISMATCH" + - "FABRIC_E_HEALTH_STALE_REPORT" + - "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" + - "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" + - "FABRIC_E_INSTANCE_ID_MISMATCH" + - "FABRIC_E_BACKUP_IN_PROGRESS" + - "FABRIC_E_RESTORE_IN_PROGRESS" + - "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" + - Possible values of the error code for HTTP status code 413 (Request + Entity Too Large) + - "FABRIC_E_VALUE_TOO_LARGE" + - Possible values of the error code for HTTP status code 500 (Internal + Server Error) + - "FABRIC_E_NODE_IS_UP" + - "E_FAIL" + - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" + - "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" + - "FABRIC_E_VOLUME_ALREADY_EXISTS" + - "FABRIC_E_VOLUME_NOT_FOUND" + - "SerializationError" + - Possible values of the error code for HTTP status code 503 (Service + Unavailable) + - "FABRIC_E_NO_WRITE_QUORUM" + - "FABRIC_E_NOT_PRIMARY" + - "FABRIC_E_NOT_READY" + - "FABRIC_E_RECONFIGURATION_PENDING" + - "FABRIC_E_SERVICE_OFFLINE" + - "E_ABORT" + - "FABRIC_E_VALUE_TOO_LARGE" + - Possible values of the error code for HTTP status code 504 (Gateway + Timeout) + - "FABRIC_E_COMMUNICATION_ERROR" + - "FABRIC_E_OPERATION_NOT_COMPLETE" + - "FABRIC_E_TIMEOUT". Possible values include: + 'FABRIC_E_INVALID_PARTITION_KEY', + 'FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR', 'FABRIC_E_INVALID_ADDRESS', + 'FABRIC_E_APPLICATION_NOT_UPGRADING', + 'FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR', + 'FABRIC_E_FABRIC_NOT_UPGRADING', + 'FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR', + 'FABRIC_E_INVALID_CONFIGURATION', 'FABRIC_E_INVALID_NAME_URI', + 'FABRIC_E_PATH_TOO_LONG', 'FABRIC_E_KEY_TOO_LARGE', + 'FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED', + 'FABRIC_E_INVALID_ATOMIC_GROUP', 'FABRIC_E_VALUE_EMPTY', + 'FABRIC_E_NODE_NOT_FOUND', 'FABRIC_E_APPLICATION_TYPE_NOT_FOUND', + 'FABRIC_E_APPLICATION_NOT_FOUND', 'FABRIC_E_SERVICE_TYPE_NOT_FOUND', + 'FABRIC_E_SERVICE_DOES_NOT_EXIST', + 'FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND', + 'FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND', + 'FABRIC_E_PARTITION_NOT_FOUND', 'FABRIC_E_REPLICA_DOES_NOT_EXIST', + 'FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST', + 'FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND', + 'FABRIC_E_DIRECTORY_NOT_FOUND', 'FABRIC_E_FABRIC_VERSION_NOT_FOUND', + 'FABRIC_E_FILE_NOT_FOUND', 'FABRIC_E_NAME_DOES_NOT_EXIST', + 'FABRIC_E_PROPERTY_DOES_NOT_EXIST', 'FABRIC_E_ENUMERATION_COMPLETED', + 'FABRIC_E_SERVICE_MANIFEST_NOT_FOUND', 'FABRIC_E_KEY_NOT_FOUND', + 'FABRIC_E_HEALTH_ENTITY_NOT_FOUND', + 'FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS', + 'FABRIC_E_APPLICATION_ALREADY_EXISTS', + 'FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION', + 'FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS', + 'FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS', + 'FABRIC_E_SERVICE_ALREADY_EXISTS', + 'FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS', + 'FABRIC_E_APPLICATION_TYPE_IN_USE', + 'FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION', + 'FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS', + 'FABRIC_E_FABRIC_VERSION_IN_USE', 'FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS', + 'FABRIC_E_NAME_ALREADY_EXISTS', 'FABRIC_E_NAME_NOT_EMPTY', + 'FABRIC_E_PROPERTY_CHECK_FAILED', 'FABRIC_E_SERVICE_METADATA_MISMATCH', + 'FABRIC_E_SERVICE_TYPE_MISMATCH', 'FABRIC_E_HEALTH_STALE_REPORT', + 'FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED', + 'FABRIC_E_NODE_HAS_NOT_STOPPED_YET', 'FABRIC_E_INSTANCE_ID_MISMATCH', + 'FABRIC_E_VALUE_TOO_LARGE', 'FABRIC_E_NO_WRITE_QUORUM', + 'FABRIC_E_NOT_PRIMARY', 'FABRIC_E_NOT_READY', + 'FABRIC_E_RECONFIGURATION_PENDING', 'FABRIC_E_SERVICE_OFFLINE', 'E_ABORT', + 'FABRIC_E_COMMUNICATION_ERROR', 'FABRIC_E_OPERATION_NOT_COMPLETE', + 'FABRIC_E_TIMEOUT', 'FABRIC_E_NODE_IS_UP', 'E_FAIL', + 'FABRIC_E_BACKUP_IS_ENABLED', + 'FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH', + 'FABRIC_E_INVALID_FOR_STATELESS_SERVICES', 'FABRIC_E_BACKUP_NOT_ENABLED', + 'FABRIC_E_BACKUP_POLICY_NOT_EXISTING', + 'FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING', + 'FABRIC_E_BACKUP_IN_PROGRESS', 'FABRIC_E_RESTORE_IN_PROGRESS', + 'FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING', + 'FABRIC_E_INVALID_SERVICE_SCALING_POLICY', 'E_INVALIDARG', + 'FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS', + 'FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND', + 'FABRIC_E_VOLUME_ALREADY_EXISTS', 'FABRIC_E_VOLUME_NOT_FOUND', + 'SerializationError', 'FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR' :type code: str or ~azure.servicefabric.models.FabricErrorCodes :param message: Error message. :type message: str @@ -12113,30 +10120,22 @@ class FabricErrorError(msrest.serialization.Model): 'message': {'key': 'Message', 'type': 'str'}, } - def __init__( - self, - *, - code: Union[str, "FabricErrorCodes"], - message: Optional[str] = None, - **kwargs - ): + def __init__(self, *, code, message: str=None, **kwargs) -> None: super(FabricErrorError, self).__init__(**kwargs) self.code = code self.message = message -class PropertyBatchInfo(msrest.serialization.Model): +class PropertyBatchInfo(Model): """Information about the results of a property batch. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FailedPropertyBatchInfo, SuccessfulPropertyBatchInfo. + sub-classes are: SuccessfulPropertyBatchInfo, FailedPropertyBatchInfo All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch info, determined by the results of a property - batch. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Successful", "Failed". - :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -12148,30 +10147,28 @@ class PropertyBatchInfo(msrest.serialization.Model): } _subtype_map = { - 'kind': {'Failed': 'FailedPropertyBatchInfo', 'Successful': 'SuccessfulPropertyBatchInfo'} + 'kind': {'Successful': 'SuccessfulPropertyBatchInfo', 'Failed': 'FailedPropertyBatchInfo'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(PropertyBatchInfo, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class FailedPropertyBatchInfo(PropertyBatchInfo): - """Derived from PropertyBatchInfo. Represents the property batch failing. Contains information about the specific batch failure. + """Derived from PropertyBatchInfo. Represents the property batch failing. + Contains information about the specific batch failure. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch info, determined by the results of a property - batch. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Successful", "Failed". - :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind - :param error_message: The error message of the failed operation. Describes the exception thrown - due to the first unsuccessful operation in the property batch. + :param kind: Required. Constant filled by server. + :type kind: str + :param error_message: The error message of the failed operation. Describes + the exception thrown due to the first unsuccessful operation in the + property batch. :type error_message: str - :param operation_index: The index of the unsuccessful operation in the property batch. + :param operation_index: The index of the unsuccessful operation in the + property batch. :type operation_index: int """ @@ -12185,26 +10182,23 @@ class FailedPropertyBatchInfo(PropertyBatchInfo): 'operation_index': {'key': 'OperationIndex', 'type': 'int'}, } - def __init__( - self, - *, - error_message: Optional[str] = None, - operation_index: Optional[int] = None, - **kwargs - ): + def __init__(self, *, error_message: str=None, operation_index: int=None, **kwargs) -> None: super(FailedPropertyBatchInfo, self).__init__(**kwargs) - self.kind = 'Failed' # type: str self.error_message = error_message self.operation_index = operation_index + self.kind = 'Failed' -class FailedUpgradeDomainProgressObject(msrest.serialization.Model): - """The detailed upgrade progress for nodes in the current upgrade domain at the point of failure. +class FailedUpgradeDomainProgressObject(Model): + """The detailed upgrade progress for nodes in the current upgrade domain at + the point of failure. - :param domain_name: The name of the upgrade domain. + :param domain_name: The name of the upgrade domain :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their statuses. - :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their + statuses + :type node_upgrade_progress_list: + list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -12212,25 +10206,22 @@ class FailedUpgradeDomainProgressObject(msrest.serialization.Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__( - self, - *, - domain_name: Optional[str] = None, - node_upgrade_progress_list: Optional[List["NodeUpgradeProgressInfo"]] = None, - **kwargs - ): + def __init__(self, *, domain_name: str=None, node_upgrade_progress_list=None, **kwargs) -> None: super(FailedUpgradeDomainProgressObject, self).__init__(**kwargs) self.domain_name = domain_name self.node_upgrade_progress_list = node_upgrade_progress_list -class FailureUpgradeDomainProgressInfo(msrest.serialization.Model): - """Information about the upgrade domain progress at the time of upgrade failure. +class FailureUpgradeDomainProgressInfo(Model): + """Information about the upgrade domain progress at the time of upgrade + failure. - :param domain_name: The name of the upgrade domain. + :param domain_name: The name of the upgrade domain :type domain_name: str - :param node_upgrade_progress_list: List of upgrading nodes and their statuses. - :type node_upgrade_progress_list: list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + :param node_upgrade_progress_list: List of upgrading nodes and their + statuses + :type node_upgrade_progress_list: + list[~azure.servicefabric.models.NodeUpgradeProgressInfo] """ _attribute_map = { @@ -12238,28 +10229,24 @@ class FailureUpgradeDomainProgressInfo(msrest.serialization.Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__( - self, - *, - domain_name: Optional[str] = None, - node_upgrade_progress_list: Optional[List["NodeUpgradeProgressInfo"]] = None, - **kwargs - ): + def __init__(self, *, domain_name: str=None, node_upgrade_progress_list=None, **kwargs) -> None: super(FailureUpgradeDomainProgressInfo, self).__init__(**kwargs) self.domain_name = domain_name self.node_upgrade_progress_list = node_upgrade_progress_list -class FileInfo(msrest.serialization.Model): +class FileInfo(Model): """Information about a image store file. :param file_size: The size of file in bytes. :type file_size: str :param file_version: Information about the version of image store file. :type file_version: ~azure.servicefabric.models.FileVersion - :param modified_date: The date and time when the image store file was last modified. - :type modified_date: ~datetime.datetime - :param store_relative_path: The file path relative to the image store root path. + :param modified_date: The date and time when the image store file was last + modified. + :type modified_date: datetime + :param store_relative_path: The file path relative to the image store root + path. :type store_relative_path: str """ @@ -12270,15 +10257,7 @@ class FileInfo(msrest.serialization.Model): 'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'}, } - def __init__( - self, - *, - file_size: Optional[str] = None, - file_version: Optional["FileVersion"] = None, - modified_date: Optional[datetime.datetime] = None, - store_relative_path: Optional[str] = None, - **kwargs - ): + def __init__(self, *, file_size: str=None, file_version=None, modified_date=None, store_relative_path: str=None, **kwargs) -> None: super(FileInfo, self).__init__(**kwargs) self.file_size = file_size self.file_version = file_version @@ -12287,17 +10266,17 @@ def __init__( class FileShareBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for file share storage used for storing or enumerating backups. + """Describes the parameters for file share storage used for storing or + enumerating backups. All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param path: Required. UNC path of the file share where to store or enumerate backups from. + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param path: Required. UNC path of the file share where to store or + enumerate backups from. :type path: str :param primary_user_name: Primary user name to access the file share. :type primary_user_name: str @@ -12305,7 +10284,7 @@ class FileShareBackupStorageDescription(BackupStorageDescription): :type primary_password: str :param secondary_user_name: Secondary user name to access the file share. :type secondary_user_name: str - :param secondary_password: Secondary password to access the share location. + :param secondary_password: Secondary password to access the share location :type secondary_password: str """ @@ -12315,8 +10294,8 @@ class FileShareBackupStorageDescription(BackupStorageDescription): } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'path': {'key': 'Path', 'type': 'str'}, 'primary_user_name': {'key': 'PrimaryUserName', 'type': 'str'}, 'primary_password': {'key': 'PrimaryPassword', 'type': 'str'}, @@ -12324,37 +10303,27 @@ class FileShareBackupStorageDescription(BackupStorageDescription): 'secondary_password': {'key': 'SecondaryPassword', 'type': 'str'}, } - def __init__( - self, - *, - path: str, - friendly_name: Optional[str] = None, - primary_user_name: Optional[str] = None, - primary_password: Optional[str] = None, - secondary_user_name: Optional[str] = None, - secondary_password: Optional[str] = None, - **kwargs - ): + def __init__(self, *, path: str, friendly_name: str=None, primary_user_name: str=None, primary_password: str=None, secondary_user_name: str=None, secondary_password: str=None, **kwargs) -> None: super(FileShareBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) - self.storage_kind = 'FileShare' # type: str self.path = path self.primary_user_name = primary_user_name self.primary_password = primary_password self.secondary_user_name = secondary_user_name self.secondary_password = secondary_password + self.storage_kind = 'FileShare' -class FileVersion(msrest.serialization.Model): +class FileVersion(Model): """Information about the version of image store file. - :param version_number: The current image store version number for the file is used in image - store for checking whether it need to be updated. + :param version_number: The current image store version number for the file + is used in image store for checking whether it need to be updated. :type version_number: str - :param epoch_data_loss_number: The epoch data loss number of image store replica when this file - entry was updated or created. + :param epoch_data_loss_number: The epoch data loss number of image store + replica when this file entry was updated or created. :type epoch_data_loss_number: str - :param epoch_configuration_number: The epoch configuration version number of the image store - replica when this file entry was created or updated. + :param epoch_configuration_number: The epoch configuration version number + of the image store replica when this file entry was created or updated. :type epoch_configuration_number: str """ @@ -12364,25 +10333,19 @@ class FileVersion(msrest.serialization.Model): 'epoch_configuration_number': {'key': 'EpochConfigurationNumber', 'type': 'str'}, } - def __init__( - self, - *, - version_number: Optional[str] = None, - epoch_data_loss_number: Optional[str] = None, - epoch_configuration_number: Optional[str] = None, - **kwargs - ): + def __init__(self, *, version_number: str=None, epoch_data_loss_number: str=None, epoch_configuration_number: str=None, **kwargs) -> None: super(FileVersion, self).__init__(**kwargs) self.version_number = version_number self.epoch_data_loss_number = epoch_data_loss_number self.epoch_configuration_number = epoch_configuration_number -class FolderInfo(msrest.serialization.Model): - """Information about a image store folder. It includes how many files this folder contains and its image store relative path. +class FolderInfo(Model): + """Information about a image store folder. It includes how many files this + folder contains and its image store relative path. - :param store_relative_path: The remote location within image store. This path is relative to - the image store root. + :param store_relative_path: The remote location within image store. This + path is relative to the image store root. :type store_relative_path: str :param file_count: The number of files from within the image store folder. :type file_count: str @@ -12393,23 +10356,17 @@ class FolderInfo(msrest.serialization.Model): 'file_count': {'key': 'FileCount', 'type': 'str'}, } - def __init__( - self, - *, - store_relative_path: Optional[str] = None, - file_count: Optional[str] = None, - **kwargs - ): + def __init__(self, *, store_relative_path: str=None, file_count: str=None, **kwargs) -> None: super(FolderInfo, self).__init__(**kwargs) self.store_relative_path = store_relative_path self.file_count = file_count -class FolderSizeInfo(msrest.serialization.Model): +class FolderSizeInfo(Model): """Information of a image store folder size. - :param store_relative_path: The remote location within image store. This path is relative to - the image store root. + :param store_relative_path: The remote location within image store. This + path is relative to the image store root. :type store_relative_path: str :param folder_size: The size of folder in bytes. :type folder_size: str @@ -12420,13 +10377,7 @@ class FolderSizeInfo(msrest.serialization.Model): 'folder_size': {'key': 'FolderSize', 'type': 'str'}, } - def __init__( - self, - *, - store_relative_path: Optional[str] = None, - folder_size: Optional[str] = None, - **kwargs - ): + def __init__(self, *, store_relative_path: str=None, folder_size: str=None, **kwargs) -> None: super(FolderSizeInfo, self).__init__(**kwargs) self.store_relative_path = store_relative_path self.folder_size = folder_size @@ -12437,14 +10388,12 @@ class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. The kind of backup schedule, time based or frequency - based.Constant filled by server. Possible values include: "Invalid", "TimeBased", - "FrequencyBased". - :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind - :param interval: Required. Defines the interval with which backups are periodically taken. It - should be specified in ISO8601 format. Timespan in seconds is not supported and will be ignored - while creating the policy. - :type interval: ~datetime.timedelta + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + :param interval: Required. Defines the interval with which backups are + periodically taken. It should be specified in ISO8601 format. Timespan in + seconds is not supported and will be ignored while creating the policy. + :type interval: timedelta """ _validation = { @@ -12457,23 +10406,19 @@ class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): 'interval': {'key': 'Interval', 'type': 'duration'}, } - def __init__( - self, - *, - interval: datetime.timedelta, - **kwargs - ): + def __init__(self, *, interval, **kwargs) -> None: super(FrequencyBasedBackupScheduleDescription, self).__init__(**kwargs) - self.schedule_kind = 'FrequencyBased' # type: str self.interval = interval + self.schedule_kind = 'FrequencyBased' -class GatewayDestination(msrest.serialization.Model): +class GatewayDestination(Model): """Describes destination endpoint for routing traffic. All required parameters must be populated in order to send to Azure. - :param application_name: Required. Name of the service fabric Mesh application. + :param application_name: Required. Name of the service fabric Mesh + application. :type application_name: str :param service_name: Required. service that contains the endpoint. :type service_name: str @@ -12493,24 +10438,18 @@ class GatewayDestination(msrest.serialization.Model): 'endpoint_name': {'key': 'endpointName', 'type': 'str'}, } - def __init__( - self, - *, - application_name: str, - service_name: str, - endpoint_name: str, - **kwargs - ): + def __init__(self, *, application_name: str, service_name: str, endpoint_name: str, **kwargs) -> None: super(GatewayDestination, self).__init__(**kwargs) self.application_name = application_name self.service_name = service_name self.endpoint_name = endpoint_name -class GatewayResourceDescription(msrest.serialization.Model): +class GatewayResourceDescription(Model): """This type describes a gateway resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. @@ -12518,21 +10457,24 @@ class GatewayResourceDescription(msrest.serialization.Model): :type name: str :param description: User readable description of the gateway. :type description: str - :param source_network: Required. Network the gateway should listen on for requests. + :param source_network: Required. Network the gateway should listen on for + requests. :type source_network: ~azure.servicefabric.models.NetworkRef - :param destination_network: Required. Network that the Application is using. + :param destination_network: Required. Network that the Application is + using. :type destination_network: ~azure.servicefabric.models.NetworkRef :param tcp: Configuration for tcp connectivity for this gateway. :type tcp: list[~azure.servicefabric.models.TcpConfig] :param http: Configuration for http connectivity for this gateway. :type http: list[~azure.servicefabric.models.HttpConfig] - :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the resource. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the gateway. + :ivar status_details: Gives additional information about the current + status of the gateway. :vartype status_details: str - :ivar ip_address: IP address of the gateway. This is populated in the response and is ignored - for incoming requests. + :ivar ip_address: IP address of the gateway. This is populated in the + response and is ignored for incoming requests. :vartype ip_address: str """ @@ -12557,17 +10499,7 @@ class GatewayResourceDescription(msrest.serialization.Model): 'ip_address': {'key': 'properties.ipAddress', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - source_network: "NetworkRef", - destination_network: "NetworkRef", - description: Optional[str] = None, - tcp: Optional[List["TcpConfig"]] = None, - http: Optional[List["HttpConfig"]] = None, - **kwargs - ): + def __init__(self, *, name: str, source_network, destination_network, description: str=None, tcp=None, http=None, **kwargs) -> None: super(GatewayResourceDescription, self).__init__(**kwargs) self.name = name self.description = description @@ -12580,27 +10512,33 @@ def __init__( self.ip_address = None -class GetBackupByStorageQueryDescription(msrest.serialization.Model): - """Describes additional filters to be applied, while listing backups, and backup storage details from where to fetch the backups. +class GetBackupByStorageQueryDescription(Model): + """Describes additional filters to be applied, while listing backups, and + backup storage details from where to fetch the backups. All required parameters must be populated in order to send to Azure. - :param start_date_time_filter: Specifies the start date time in ISO8601 from which to enumerate - backups. If not specified, backups are enumerated from the beginning. - :type start_date_time_filter: ~datetime.datetime - :param end_date_time_filter: Specifies the end date time in ISO8601 till which to enumerate - backups. If not specified, backups are enumerated till the end. - :type end_date_time_filter: ~datetime.datetime - :param latest: If specified as true, gets the most recent backup (within the specified time - range) for every partition under the specified backup entity. + :param start_date_time_filter: Specifies the start date time in ISO8601 + from which to enumerate backups. If not specified, backups are enumerated + from the beginning. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specifies the end date time in ISO8601 till + which to enumerate backups. If not specified, backups are enumerated till + the end. + :type end_date_time_filter: datetime + :param latest: If specified as true, gets the most recent backup (within + the specified time range) for every partition under the specified backup + entity. Default value: False . :type latest: bool - :param storage: Required. Describes the parameters for the backup storage from where to - enumerate backups. This is optional and by default backups are enumerated from the backup - storage where this backup entity is currently being backed up (as specified in backup policy). - This parameter is useful to be able to enumerate backups from another cluster where you may - intend to restore. + :param storage: Required. Describes the parameters for the backup storage + from where to enumerate backups. This is optional and by default backups + are enumerated from the backup storage where this backup entity is + currently being backed up (as specified in backup policy). This parameter + is useful to be able to enumerate backups from another cluster where you + may intend to restore. :type storage: ~azure.servicefabric.models.BackupStorageDescription - :param backup_entity: Required. Indicates the entity for which to enumerate backups. + :param backup_entity: Required. Indicates the entity for which to + enumerate backups. :type backup_entity: ~azure.servicefabric.models.BackupEntity """ @@ -12617,16 +10555,7 @@ class GetBackupByStorageQueryDescription(msrest.serialization.Model): 'backup_entity': {'key': 'BackupEntity', 'type': 'BackupEntity'}, } - def __init__( - self, - *, - storage: "BackupStorageDescription", - backup_entity: "BackupEntity", - start_date_time_filter: Optional[datetime.datetime] = None, - end_date_time_filter: Optional[datetime.datetime] = None, - latest: Optional[bool] = False, - **kwargs - ): + def __init__(self, *, storage, backup_entity, start_date_time_filter=None, end_date_time_filter=None, latest: bool=False, **kwargs) -> None: super(GetBackupByStorageQueryDescription, self).__init__(**kwargs) self.start_date_time_filter = start_date_time_filter self.end_date_time_filter = end_date_time_filter @@ -12636,44 +10565,39 @@ def __init__( class GetPropertyBatchOperation(PropertyBatchOperation): - """Represents a PropertyBatchOperation that gets the specified property if it exists. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + """Represents a PropertyBatchOperation that gets the specified property if it + exists. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param include_value: Whether or not to return the property value with the metadata. - True if values should be returned with the metadata; False to return only property metadata. + :param kind: Required. Constant filled by server. + :type kind: str + :param include_value: Whether or not to return the property value with the + metadata. + True if values should be returned with the metadata; False to return only + property metadata. Default value: False . :type include_value: bool """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'include_value': {'key': 'IncludeValue', 'type': 'bool'}, } - def __init__( - self, - *, - property_name: str, - include_value: Optional[bool] = False, - **kwargs - ): + def __init__(self, *, property_name: str, include_value: bool=False, **kwargs) -> None: super(GetPropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) - self.kind = 'Get' # type: str self.include_value = include_value + self.kind = 'Get' class GuidPropertyValue(PropertyValue): @@ -12681,10 +10605,8 @@ class GuidPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str :param data: Required. The data of the property value. :type data: str """ @@ -12699,22 +10621,18 @@ class GuidPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__( - self, - *, - data: str, - **kwargs - ): + def __init__(self, *, data: str, **kwargs) -> None: super(GuidPropertyValue, self).__init__(**kwargs) - self.kind = 'Guid' # type: str self.data = data + self.kind = 'Guid' -class HealthEvaluationWrapper(msrest.serialization.Model): +class HealthEvaluationWrapper(Model): """Wrapper object for health evaluation. - :param health_evaluation: Represents a health evaluation which describes the data and the - algorithm used by health manager to evaluate the health of an entity. + :param health_evaluation: Represents a health evaluation which describes + the data and the algorithm used by health manager to evaluate the health + of an entity. :type health_evaluation: ~azure.servicefabric.models.HealthEvaluation """ @@ -12722,81 +10640,86 @@ class HealthEvaluationWrapper(msrest.serialization.Model): 'health_evaluation': {'key': 'HealthEvaluation', 'type': 'HealthEvaluation'}, } - def __init__( - self, - *, - health_evaluation: Optional["HealthEvaluation"] = None, - **kwargs - ): + def __init__(self, *, health_evaluation=None, **kwargs) -> None: super(HealthEvaluationWrapper, self).__init__(**kwargs) self.health_evaluation = health_evaluation -class HealthInformation(msrest.serialization.Model): - """Represents common health report information. It is included in all health reports sent to health store and in all health events returned by health queries. +class HealthInformation(Model): + """Represents common health report information. It is included in all health + reports sent to health store and in all health events returned by health + queries. All required parameters must be populated in order to send to Azure. - :param source_id: Required. The source name that identifies the client/watchdog/system - component that generated the health information. + :param source_id: Required. The source name that identifies the + client/watchdog/system component that generated the health information. :type source_id: str - :param property: Required. The property of the health information. An entity can have health - reports for different properties. - The property is a string and not a fixed enumeration to allow the reporter flexibility to - categorize the state condition that triggers the report. - For example, a reporter with SourceId "LocalWatchdog" can monitor the state of the available - disk on a node, + :param property: Required. The property of the health information. An + entity can have health reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter + flexibility to categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the + state of the available disk on a node, so it can report "AvailableDisk" property on that node. - The same reporter can monitor the node connectivity, so it can report a property - "Connectivity" on the same node. - In the health store, these reports are treated as separate health events for the specified - node. - - Together with the SourceId, the property uniquely identifies the health information. + The same reporter can monitor the node connectivity, so it can report a + property "Connectivity" on the same node. + In the health store, these reports are treated as separate health events + for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. :type property: str - :param health_state: Required. The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: Required. The health state of a Service Fabric entity + such as Cluster, Node, Application, Service, Partition, Replica etc. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param time_to_live_in_milli_seconds: The duration for which this health report is valid. This - field uses ISO8601 format for specifying the duration. - When clients report periodically, they should send reports with higher frequency than time to - live. - If clients report on transition, they can set the time to live to infinite. - When time to live expires, the health event that contains the health information - is either removed from health store, if RemoveWhenExpired is true, or evaluated at error, if - RemoveWhenExpired false. - + :param time_to_live_in_milli_seconds: The duration for which this health + report is valid. This field uses ISO8601 format for specifying the + duration. + When clients report periodically, they should send reports with higher + frequency than time to live. + If clients report on transition, they can set the time to live to + infinite. + When time to live expires, the health event that contains the health + information + is either removed from health store, if RemoveWhenExpired is true, or + evaluated at error, if RemoveWhenExpired false. If not specified, time to live defaults to infinite value. - :type time_to_live_in_milli_seconds: ~datetime.timedelta - :param description: The description of the health information. It represents free text used to - add human readable information about the report. + :type time_to_live_in_milli_seconds: timedelta + :param description: The description of the health information. It + represents free text used to add human readable information about the + report. The maximum string length for the description is 4096 characters. If the provided string is longer, it will be automatically truncated. - When truncated, the last characters of the description contain a marker "[Truncated]", and - total string size is 4096 characters. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. The presence of the marker indicates to users that truncation occurred. - Note that when truncated, the description has less than 4096 characters from the original - string. + Note that when truncated, the description has less than 4096 characters + from the original string. :type description: str - :param sequence_number: The sequence number for this health report as a numeric string. - The report sequence number is used by the health store to detect stale reports. - If not specified, a sequence number is auto-generated by the health client when a report is - added. + :param sequence_number: The sequence number for this health report as a + numeric string. + The report sequence number is used by the health store to detect stale + reports. + If not specified, a sequence number is auto-generated by the health client + when a report is added. :type sequence_number: str - :param remove_when_expired: Value that indicates whether the report is removed from health - store when it expires. - If set to true, the report is removed from the health store after it expires. - If set to false, the report is treated as an error when expired. The value of this property is - false by default. - When clients report periodically, they should set RemoveWhenExpired false (default). - This way, if the reporter has issues (e.g. deadlock) and can't report, the entity is evaluated - at error when the health report expires. + :param remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. + If set to true, the report is removed from the health store after it + expires. + If set to false, the report is treated as an error when expired. The value + of this property is false by default. + When clients report periodically, they should set RemoveWhenExpired false + (default). + This way, if the reporter has issues (e.g. deadlock) and can't report, the + entity is evaluated at error when the health report expires. This flags the entity as being in Error health state. :type remove_when_expired: bool - :param health_report_id: A health report ID which identifies the health report and can be used - to find more detailed information about a specific health event at - aka.ms/sfhealthid. + :param health_report_id: A health report ID which identifies the health + report and can be used to find more detailed information about a specific + health event at + aka.ms/sfhealthid :type health_report_id: str """ @@ -12817,19 +10740,7 @@ class HealthInformation(msrest.serialization.Model): 'health_report_id': {'key': 'HealthReportId', 'type': 'str'}, } - def __init__( - self, - *, - source_id: str, - property: str, - health_state: Union[str, "HealthState"], - time_to_live_in_milli_seconds: Optional[datetime.timedelta] = None, - description: Optional[str] = None, - sequence_number: Optional[str] = None, - remove_when_expired: Optional[bool] = None, - health_report_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, source_id: str, property: str, health_state, time_to_live_in_milli_seconds=None, description: str=None, sequence_number: str=None, remove_when_expired: bool=None, health_report_id: str=None, **kwargs) -> None: super(HealthInformation, self).__init__(**kwargs) self.source_id = source_id self.property = property @@ -12842,108 +10753,121 @@ def __init__( class HealthEvent(HealthInformation): - """Represents health information reported on a health entity, such as cluster, application or node, with additional metadata added by the Health Manager. + """Represents health information reported on a health entity, such as cluster, + application or node, with additional metadata added by the Health Manager. All required parameters must be populated in order to send to Azure. - :param source_id: Required. The source name that identifies the client/watchdog/system - component that generated the health information. + :param source_id: Required. The source name that identifies the + client/watchdog/system component that generated the health information. :type source_id: str - :param property: Required. The property of the health information. An entity can have health - reports for different properties. - The property is a string and not a fixed enumeration to allow the reporter flexibility to - categorize the state condition that triggers the report. - For example, a reporter with SourceId "LocalWatchdog" can monitor the state of the available - disk on a node, + :param property: Required. The property of the health information. An + entity can have health reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter + flexibility to categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the + state of the available disk on a node, so it can report "AvailableDisk" property on that node. - The same reporter can monitor the node connectivity, so it can report a property - "Connectivity" on the same node. - In the health store, these reports are treated as separate health events for the specified - node. - - Together with the SourceId, the property uniquely identifies the health information. + The same reporter can monitor the node connectivity, so it can report a + property "Connectivity" on the same node. + In the health store, these reports are treated as separate health events + for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. :type property: str - :param health_state: Required. The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: Required. The health state of a Service Fabric entity + such as Cluster, Node, Application, Service, Partition, Replica etc. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param time_to_live_in_milli_seconds: The duration for which this health report is valid. This - field uses ISO8601 format for specifying the duration. - When clients report periodically, they should send reports with higher frequency than time to - live. - If clients report on transition, they can set the time to live to infinite. - When time to live expires, the health event that contains the health information - is either removed from health store, if RemoveWhenExpired is true, or evaluated at error, if - RemoveWhenExpired false. - + :param time_to_live_in_milli_seconds: The duration for which this health + report is valid. This field uses ISO8601 format for specifying the + duration. + When clients report periodically, they should send reports with higher + frequency than time to live. + If clients report on transition, they can set the time to live to + infinite. + When time to live expires, the health event that contains the health + information + is either removed from health store, if RemoveWhenExpired is true, or + evaluated at error, if RemoveWhenExpired false. If not specified, time to live defaults to infinite value. - :type time_to_live_in_milli_seconds: ~datetime.timedelta - :param description: The description of the health information. It represents free text used to - add human readable information about the report. + :type time_to_live_in_milli_seconds: timedelta + :param description: The description of the health information. It + represents free text used to add human readable information about the + report. The maximum string length for the description is 4096 characters. If the provided string is longer, it will be automatically truncated. - When truncated, the last characters of the description contain a marker "[Truncated]", and - total string size is 4096 characters. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. The presence of the marker indicates to users that truncation occurred. - Note that when truncated, the description has less than 4096 characters from the original - string. + Note that when truncated, the description has less than 4096 characters + from the original string. :type description: str - :param sequence_number: The sequence number for this health report as a numeric string. - The report sequence number is used by the health store to detect stale reports. - If not specified, a sequence number is auto-generated by the health client when a report is - added. + :param sequence_number: The sequence number for this health report as a + numeric string. + The report sequence number is used by the health store to detect stale + reports. + If not specified, a sequence number is auto-generated by the health client + when a report is added. :type sequence_number: str - :param remove_when_expired: Value that indicates whether the report is removed from health - store when it expires. - If set to true, the report is removed from the health store after it expires. - If set to false, the report is treated as an error when expired. The value of this property is - false by default. - When clients report periodically, they should set RemoveWhenExpired false (default). - This way, if the reporter has issues (e.g. deadlock) and can't report, the entity is evaluated - at error when the health report expires. + :param remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. + If set to true, the report is removed from the health store after it + expires. + If set to false, the report is treated as an error when expired. The value + of this property is false by default. + When clients report periodically, they should set RemoveWhenExpired false + (default). + This way, if the reporter has issues (e.g. deadlock) and can't report, the + entity is evaluated at error when the health report expires. This flags the entity as being in Error health state. :type remove_when_expired: bool - :param health_report_id: A health report ID which identifies the health report and can be used - to find more detailed information about a specific health event at - aka.ms/sfhealthid. + :param health_report_id: A health report ID which identifies the health + report and can be used to find more detailed information about a specific + health event at + aka.ms/sfhealthid :type health_report_id: str - :param is_expired: Returns true if the health event is expired, otherwise false. + :param is_expired: Returns true if the health event is expired, otherwise + false. :type is_expired: bool - :param source_utc_timestamp: The date and time when the health report was sent by the source. - :type source_utc_timestamp: ~datetime.datetime - :param last_modified_utc_timestamp: The date and time when the health report was last modified - by the health store. - :type last_modified_utc_timestamp: ~datetime.datetime - :param last_ok_transition_at: If the current health state is 'Ok', this property returns the - time at which the health report was first reported with 'Ok'. - For periodic reporting, many reports with the same state may have been generated. - This property returns the date and time when the first 'Ok' health report was received. - - If the current health state is 'Error' or 'Warning', returns the date and time at which the - health state was last in 'Ok', before transitioning to a different state. - + :param source_utc_timestamp: The date and time when the health report was + sent by the source. + :type source_utc_timestamp: datetime + :param last_modified_utc_timestamp: The date and time when the health + report was last modified by the health store. + :type last_modified_utc_timestamp: datetime + :param last_ok_transition_at: If the current health state is 'Ok', this + property returns the time at which the health report was first reported + with 'Ok'. + For periodic reporting, many reports with the same state may have been + generated. + This property returns the date and time when the first 'Ok' health report + was received. + If the current health state is 'Error' or 'Warning', returns the date and + time at which the health state was last in 'Ok', before transitioning to a + different state. If the health state was never 'Ok', the value will be zero date-time. - :type last_ok_transition_at: ~datetime.datetime - :param last_warning_transition_at: If the current health state is 'Warning', this property - returns the time at which the health report was first reported with 'Warning'. For periodic - reporting, many reports with the same state may have been generated however, this property - returns only the date and time at the first 'Warning' health report was received. - - If the current health state is 'Ok' or 'Error', returns the date and time at which the health - state was last in 'Warning', before transitioning to a different state. - + :type last_ok_transition_at: datetime + :param last_warning_transition_at: If the current health state is + 'Warning', this property returns the time at which the health report was + first reported with 'Warning'. For periodic reporting, many reports with + the same state may have been generated however, this property returns only + the date and time at the first 'Warning' health report was received. + If the current health state is 'Ok' or 'Error', returns the date and time + at which the health state was last in 'Warning', before transitioning to a + different state. If the health state was never 'Warning', the value will be zero date-time. - :type last_warning_transition_at: ~datetime.datetime - :param last_error_transition_at: If the current health state is 'Error', this property returns - the time at which the health report was first reported with 'Error'. For periodic reporting, - many reports with the same state may have been generated however, this property returns only - the date and time at the first 'Error' health report was received. - - If the current health state is 'Ok' or 'Warning', returns the date and time at which the - health state was last in 'Error', before transitioning to a different state. - + :type last_warning_transition_at: datetime + :param last_error_transition_at: If the current health state is 'Error', + this property returns the time at which the health report was first + reported with 'Error'. For periodic reporting, many reports with the same + state may have been generated however, this property returns only the date + and time at the first 'Error' health report was received. + If the current health state is 'Ok' or 'Warning', returns the date and + time at which the health state was last in 'Error', before transitioning + to a different state. If the health state was never 'Error', the value will be zero date-time. - :type last_error_transition_at: ~datetime.datetime + :type last_error_transition_at: datetime """ _validation = { @@ -12969,25 +10893,7 @@ class HealthEvent(HealthInformation): 'last_error_transition_at': {'key': 'LastErrorTransitionAt', 'type': 'iso-8601'}, } - def __init__( - self, - *, - source_id: str, - property: str, - health_state: Union[str, "HealthState"], - time_to_live_in_milli_seconds: Optional[datetime.timedelta] = None, - description: Optional[str] = None, - sequence_number: Optional[str] = None, - remove_when_expired: Optional[bool] = None, - health_report_id: Optional[str] = None, - is_expired: Optional[bool] = None, - source_utc_timestamp: Optional[datetime.datetime] = None, - last_modified_utc_timestamp: Optional[datetime.datetime] = None, - last_ok_transition_at: Optional[datetime.datetime] = None, - last_warning_transition_at: Optional[datetime.datetime] = None, - last_error_transition_at: Optional[datetime.datetime] = None, - **kwargs - ): + def __init__(self, *, source_id: str, property: str, health_state, time_to_live_in_milli_seconds=None, description: str=None, sequence_number: str=None, remove_when_expired: bool=None, health_report_id: str=None, is_expired: bool=None, source_utc_timestamp=None, last_modified_utc_timestamp=None, last_ok_transition_at=None, last_warning_transition_at=None, last_error_transition_at=None, **kwargs) -> None: super(HealthEvent, self).__init__(source_id=source_id, property=property, health_state=health_state, time_to_live_in_milli_seconds=time_to_live_in_milli_seconds, description=description, sequence_number=sequence_number, remove_when_expired=remove_when_expired, health_report_id=health_report_id, **kwargs) self.is_expired = is_expired self.source_utc_timestamp = source_utc_timestamp @@ -12997,14 +10903,18 @@ def __init__( self.last_error_transition_at = last_error_transition_at -class HealthStateCount(msrest.serialization.Model): - """Represents information about how many health entities are in Ok, Warning and Error health state. +class HealthStateCount(Model): + """Represents information about how many health entities are in Ok, Warning + and Error health state. - :param ok_count: The number of health entities with aggregated health state Ok. + :param ok_count: The number of health entities with aggregated health + state Ok. :type ok_count: long - :param warning_count: The number of health entities with aggregated health state Warning. + :param warning_count: The number of health entities with aggregated health + state Warning. :type warning_count: long - :param error_count: The number of health entities with aggregated health state Error. + :param error_count: The number of health entities with aggregated health + state Error. :type error_count: long """ @@ -13020,54 +10930,49 @@ class HealthStateCount(msrest.serialization.Model): 'error_count': {'key': 'ErrorCount', 'type': 'long'}, } - def __init__( - self, - *, - ok_count: Optional[int] = None, - warning_count: Optional[int] = None, - error_count: Optional[int] = None, - **kwargs - ): + def __init__(self, *, ok_count: int=None, warning_count: int=None, error_count: int=None, **kwargs) -> None: super(HealthStateCount, self).__init__(**kwargs) self.ok_count = ok_count self.warning_count = warning_count self.error_count = error_count -class HealthStatistics(msrest.serialization.Model): - """The health statistics of an entity, returned as part of the health query result when the query description is configured to include statistics. -The statistics include health state counts for all children types of the current entity. -For example, for cluster, the health statistics include health state counts for nodes, applications, services, partitions, replicas, deployed applications and deployed service packages. -For partition, the health statistics include health counts for replicas. +class HealthStatistics(Model): + """The health statistics of an entity, returned as part of the health query + result when the query description is configured to include statistics. + The statistics include health state counts for all children types of the + current entity. + For example, for cluster, the health statistics include health state counts + for nodes, applications, services, partitions, replicas, deployed + applications and deployed service packages. + For partition, the health statistics include health counts for replicas. - :param health_state_count_list: List of health state counts per entity kind, which keeps track - of how many children of the queried entity are in Ok, Warning and Error state. - :type health_state_count_list: list[~azure.servicefabric.models.EntityKindHealthStateCount] + :param health_state_count_list: List of health state counts per entity + kind, which keeps track of how many children of the queried entity are in + Ok, Warning and Error state. + :type health_state_count_list: + list[~azure.servicefabric.models.EntityKindHealthStateCount] """ _attribute_map = { 'health_state_count_list': {'key': 'HealthStateCountList', 'type': '[EntityKindHealthStateCount]'}, } - def __init__( - self, - *, - health_state_count_list: Optional[List["EntityKindHealthStateCount"]] = None, - **kwargs - ): + def __init__(self, *, health_state_count_list=None, **kwargs) -> None: super(HealthStatistics, self).__init__(**kwargs) self.health_state_count_list = health_state_count_list -class HttpConfig(msrest.serialization.Model): - """Describes the http configuration for external connectivity for this network. +class HttpConfig(Model): + """Describes the http configuration for external connectivity for this + network. All required parameters must be populated in order to send to Azure. :param name: Required. http gateway config name. :type name: str - :param port: Required. Specifies the port at which the service endpoint below needs to be - exposed. + :param port: Required. Specifies the port at which the service endpoint + below needs to be exposed. :type port: int :param hosts: Required. description for routing. :type hosts: list[~azure.servicefabric.models.HttpHostConfig] @@ -13085,30 +10990,23 @@ class HttpConfig(msrest.serialization.Model): 'hosts': {'key': 'hosts', 'type': '[HttpHostConfig]'}, } - def __init__( - self, - *, - name: str, - port: int, - hosts: List["HttpHostConfig"], - **kwargs - ): + def __init__(self, *, name: str, port: int, hosts, **kwargs) -> None: super(HttpConfig, self).__init__(**kwargs) self.name = name self.port = port self.hosts = hosts -class HttpHostConfig(msrest.serialization.Model): +class HttpHostConfig(Model): """Describes the hostname properties for http routing. All required parameters must be populated in order to send to Azure. :param name: Required. http hostname config name. :type name: str - :param routes: Required. Route information to use for routing. Routes are processed in the - order they are specified. Specify routes that are more specific before routes that can handle - general cases. + :param routes: Required. Route information to use for routing. Routes are + processed in the order they are specified. Specify routes that are more + specific before routes that can handle general cases. :type routes: list[~azure.servicefabric.models.HttpRouteConfig] """ @@ -13122,19 +11020,13 @@ class HttpHostConfig(msrest.serialization.Model): 'routes': {'key': 'routes', 'type': '[HttpRouteConfig]'}, } - def __init__( - self, - *, - name: str, - routes: List["HttpRouteConfig"], - **kwargs - ): + def __init__(self, *, name: str, routes, **kwargs) -> None: super(HttpHostConfig, self).__init__(**kwargs) self.name = name self.routes = routes -class HttpRouteConfig(msrest.serialization.Model): +class HttpRouteConfig(Model): """Describes the hostname properties for http routing. All required parameters must be populated in order to send to Azure. @@ -13143,7 +11035,8 @@ class HttpRouteConfig(msrest.serialization.Model): :type name: str :param match: Required. Describes a rule for http route matching. :type match: ~azure.servicefabric.models.HttpRouteMatchRule - :param destination: Required. Describes destination endpoint for routing traffic. + :param destination: Required. Describes destination endpoint for routing + traffic. :type destination: ~azure.servicefabric.models.GatewayDestination """ @@ -13159,21 +11052,14 @@ class HttpRouteConfig(msrest.serialization.Model): 'destination': {'key': 'destination', 'type': 'GatewayDestination'}, } - def __init__( - self, - *, - name: str, - match: "HttpRouteMatchRule", - destination: "GatewayDestination", - **kwargs - ): + def __init__(self, *, name: str, match, destination, **kwargs) -> None: super(HttpRouteConfig, self).__init__(**kwargs) self.name = name self.match = match self.destination = destination -class HttpRouteMatchHeader(msrest.serialization.Model): +class HttpRouteMatchHeader(Model): """Describes header information for http route matching. All required parameters must be populated in order to send to Azure. @@ -13182,7 +11068,7 @@ class HttpRouteMatchHeader(msrest.serialization.Model): :type name: str :param value: Value of header to match in request. :type value: str - :param type: how to match header value. Possible values include: "exact". + :param type: how to match header value. Possible values include: 'exact' :type type: str or ~azure.servicefabric.models.HeaderMatchType """ @@ -13196,36 +11082,33 @@ class HttpRouteMatchHeader(msrest.serialization.Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - value: Optional[str] = None, - type: Optional[Union[str, "HeaderMatchType"]] = None, - **kwargs - ): + def __init__(self, *, name: str, value: str=None, type=None, **kwargs) -> None: super(HttpRouteMatchHeader, self).__init__(**kwargs) self.name = name self.value = value self.type = type -class HttpRouteMatchPath(msrest.serialization.Model): +class HttpRouteMatchPath(Model): """Path to match for routing. + Variables are only populated by the server, and will be ignored when + sending a request. + All required parameters must be populated in order to send to Azure. :param value: Required. Uri path to match for request. :type value: str :param rewrite: replacement string for matched part of the Uri. :type rewrite: str - :param type: Required. how to match value in the Uri. Possible values include: "prefix". - :type type: str or ~azure.servicefabric.models.PathMatchType + :ivar type: Required. how to match value in the Uri. Default value: + "prefix" . + :vartype type: str """ _validation = { 'value': {'required': True}, - 'type': {'required': True}, + 'type': {'required': True, 'constant': True}, } _attribute_map = { @@ -13234,21 +11117,15 @@ class HttpRouteMatchPath(msrest.serialization.Model): 'type': {'key': 'type', 'type': 'str'}, } - def __init__( - self, - *, - value: str, - type: Union[str, "PathMatchType"], - rewrite: Optional[str] = None, - **kwargs - ): + type = "prefix" + + def __init__(self, *, value: str, rewrite: str=None, **kwargs) -> None: super(HttpRouteMatchPath, self).__init__(**kwargs) self.value = value self.rewrite = rewrite - self.type = type -class HttpRouteMatchRule(msrest.serialization.Model): +class HttpRouteMatchRule(Model): """Describes a rule for http route matching. All required parameters must be populated in order to send to Azure. @@ -13268,35 +11145,32 @@ class HttpRouteMatchRule(msrest.serialization.Model): 'headers': {'key': 'headers', 'type': '[HttpRouteMatchHeader]'}, } - def __init__( - self, - *, - path: "HttpRouteMatchPath", - headers: Optional[List["HttpRouteMatchHeader"]] = None, - **kwargs - ): + def __init__(self, *, path, headers=None, **kwargs) -> None: super(HttpRouteMatchRule, self).__init__(**kwargs) self.path = path self.headers = headers -class IdentityDescription(msrest.serialization.Model): +class IdentityDescription(Model): """Information describing the identities associated with this application. All required parameters must be populated in order to send to Azure. - :param token_service_endpoint: the endpoint for the token service managing this identity. + :param token_service_endpoint: the endpoint for the token service managing + this identity :type token_service_endpoint: str - :param type: Required. the types of identities associated with this resource; currently - restricted to 'SystemAssigned and UserAssigned'. + :param type: Required. the types of identities associated with this + resource; currently restricted to 'SystemAssigned and UserAssigned' :type type: str - :param tenant_id: the identifier of the tenant containing the application's identity. + :param tenant_id: the identifier of the tenant containing the + application's identity. :type tenant_id: str - :param principal_id: the object identifier of the Service Principal of the identity associated - with this resource. + :param principal_id: the object identifier of the Service Principal of the + identity associated with this resource. :type principal_id: str :param user_assigned_identities: represents user assigned identities map. - :type user_assigned_identities: dict[str, ~azure.servicefabric.models.IdentityItemDescription] + :type user_assigned_identities: dict[str, + ~azure.servicefabric.models.IdentityItemDescription] """ _validation = { @@ -13311,16 +11185,7 @@ class IdentityDescription(msrest.serialization.Model): 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{IdentityItemDescription}'}, } - def __init__( - self, - *, - type: str, - token_service_endpoint: Optional[str] = None, - tenant_id: Optional[str] = None, - principal_id: Optional[str] = None, - user_assigned_identities: Optional[Dict[str, "IdentityItemDescription"]] = None, - **kwargs - ): + def __init__(self, *, type: str, token_service_endpoint: str=None, tenant_id: str=None, principal_id: str=None, user_assigned_identities=None, **kwargs) -> None: super(IdentityDescription, self).__init__(**kwargs) self.token_service_endpoint = token_service_endpoint self.type = type @@ -13329,14 +11194,14 @@ def __init__( self.user_assigned_identities = user_assigned_identities -class IdentityItemDescription(msrest.serialization.Model): +class IdentityItemDescription(Model): """Describes a single user-assigned identity associated with the application. - :param principal_id: the object identifier of the Service Principal which this identity - represents. + :param principal_id: the object identifier of the Service Principal which + this identity represents. :type principal_id: str - :param client_id: the client identifier of the Service Principal which this identity - represents. + :param client_id: the client identifier of the Service Principal which + this identity represents. :type client_id: str """ @@ -13345,35 +11210,30 @@ class IdentityItemDescription(msrest.serialization.Model): 'client_id': {'key': 'clientId', 'type': 'str'}, } - def __init__( - self, - *, - principal_id: Optional[str] = None, - client_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, principal_id: str=None, client_id: str=None, **kwargs) -> None: super(IdentityItemDescription, self).__init__(**kwargs) self.principal_id = principal_id self.client_id = client_id -class ImageRegistryCredential(msrest.serialization.Model): +class ImageRegistryCredential(Model): """Image registry credential. All required parameters must be populated in order to send to Azure. - :param server: Required. Docker image registry server, without protocol such as ``http`` and - ``https``. + :param server: Required. Docker image registry server, without protocol + such as `http` and `https`. :type server: str :param username: Required. The username for the private registry. :type username: str - :param password_type: The type of the image registry password being given in password. Possible - values include: "ClearText", "KeyVaultReference", "SecretValueReference". Default value: - "ClearText". - :type password_type: str or ~azure.servicefabric.models.ImageRegistryPasswordType - :param password: The password for the private registry. The password is required for create or - update operations, however it is not returned in the get or list operations. Will be processed - based on the type provided. + :param password_type: The type of the image registry password being given + in password. Possible values include: 'ClearText', 'KeyVaultReference', + 'SecretValueReference'. Default value: "ClearText" . + :type password_type: str or + ~azure.servicefabric.models.ImageRegistryPasswordType + :param password: The password for the private registry. The password is + required for create or update operations, however it is not returned in + the get or list operations. Will be processed based on the type provided. :type password: str """ @@ -13389,15 +11249,7 @@ class ImageRegistryCredential(msrest.serialization.Model): 'password': {'key': 'password', 'type': 'str'}, } - def __init__( - self, - *, - server: str, - username: str, - password_type: Optional[Union[str, "ImageRegistryPasswordType"]] = "ClearText", - password: Optional[str] = None, - **kwargs - ): + def __init__(self, *, server: str, username: str, password_type="ClearText", password: str=None, **kwargs) -> None: super(ImageRegistryCredential, self).__init__(**kwargs) self.server = server self.username = username @@ -13405,14 +11257,14 @@ def __init__( self.password = password -class ImageStoreContent(msrest.serialization.Model): +class ImageStoreContent(Model): """Information about the image store content. - :param store_files: The list of image store file info objects represents files found under the - given image store relative path. + :param store_files: The list of image store file info objects represents + files found under the given image store relative path. :type store_files: list[~azure.servicefabric.models.FileInfo] - :param store_folders: The list of image store folder info objects represents subfolders found - under the given image store relative path. + :param store_folders: The list of image store folder info objects + represents subfolders found under the given image store relative path. :type store_folders: list[~azure.servicefabric.models.FolderInfo] """ @@ -13421,35 +11273,31 @@ class ImageStoreContent(msrest.serialization.Model): 'store_folders': {'key': 'StoreFolders', 'type': '[FolderInfo]'}, } - def __init__( - self, - *, - store_files: Optional[List["FileInfo"]] = None, - store_folders: Optional[List["FolderInfo"]] = None, - **kwargs - ): + def __init__(self, *, store_files=None, store_folders=None, **kwargs) -> None: super(ImageStoreContent, self).__init__(**kwargs) self.store_files = store_files self.store_folders = store_folders -class ImageStoreCopyDescription(msrest.serialization.Model): - """Information about how to copy image store content from one image store relative path to another image store relative path. +class ImageStoreCopyDescription(Model): + """Information about how to copy image store content from one image store + relative path to another image store relative path. All required parameters must be populated in order to send to Azure. - :param remote_source: Required. The relative path of source image store content to be copied - from. + :param remote_source: Required. The relative path of source image store + content to be copied from. :type remote_source: str - :param remote_destination: Required. The relative path of destination image store content to be - copied to. + :param remote_destination: Required. The relative path of destination + image store content to be copied to. :type remote_destination: str :param skip_files: The list of the file names to be skipped for copying. :type skip_files: list[str] - :param check_mark_file: Indicates whether to check mark file during copying. The property is - true if checking mark file is required, false otherwise. The mark file is used to check whether - the folder is well constructed. If the property is true and mark file does not exist, the copy - is skipped. + :param check_mark_file: Indicates whether to check mark file during + copying. The property is true if checking mark file is required, false + otherwise. The mark file is used to check whether the folder is well + constructed. If the property is true and mark file does not exist, the + copy is skipped. :type check_mark_file: bool """ @@ -13465,15 +11313,7 @@ class ImageStoreCopyDescription(msrest.serialization.Model): 'check_mark_file': {'key': 'CheckMarkFile', 'type': 'bool'}, } - def __init__( - self, - *, - remote_source: str, - remote_destination: str, - skip_files: Optional[List[str]] = None, - check_mark_file: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, remote_source: str, remote_destination: str, skip_files=None, check_mark_file: bool=None, **kwargs) -> None: super(ImageStoreCopyDescription, self).__init__(**kwargs) self.remote_source = remote_source self.remote_destination = remote_destination @@ -13481,27 +11321,27 @@ def __init__( self.check_mark_file = check_mark_file -class ImageStoreInfo(msrest.serialization.Model): +class ImageStoreInfo(Model): """Information about the ImageStore's resource usage. - :param disk_info: disk capacity and available disk space on the node where the ImageStore - primary is placed. + :param disk_info: disk capacity and available disk space on the node where + the ImageStore primary is placed. :type disk_info: ~azure.servicefabric.models.DiskInfo :param used_by_metadata: the ImageStore's file system usage for metadata. :type used_by_metadata: ~azure.servicefabric.models.UsageInfo - :param used_by_staging: The ImageStore's file system usage for staging files that are being - uploaded. + :param used_by_staging: The ImageStore's file system usage for staging + files that are being uploaded. :type used_by_staging: ~azure.servicefabric.models.UsageInfo - :param used_by_copy: the ImageStore's file system usage for copied application and cluster - packages. `Removing application and cluster packages - `_ will - free up this space. + :param used_by_copy: the ImageStore's file system usage for copied + application and cluster packages. [Removing application and cluster + packages](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-deleteimagestorecontent) + will free up this space. :type used_by_copy: ~azure.servicefabric.models.UsageInfo - :param used_by_register: the ImageStore's file system usage for registered and cluster - packages. `Unregistering application - `_ - and `cluster packages - `_ + :param used_by_register: the ImageStore's file system usage for registered + and cluster packages. [Unregistering + application](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-unprovisionapplicationtype) + and [cluster + packages](https://docs.microsoft.com/rest/api/servicefabric/sfclient-api-unprovisionapplicationtype) will free up this space. :type used_by_register: ~azure.servicefabric.models.UsageInfo """ @@ -13514,16 +11354,7 @@ class ImageStoreInfo(msrest.serialization.Model): 'used_by_register': {'key': 'UsedByRegister', 'type': 'UsageInfo'}, } - def __init__( - self, - *, - disk_info: Optional["DiskInfo"] = None, - used_by_metadata: Optional["UsageInfo"] = None, - used_by_staging: Optional["UsageInfo"] = None, - used_by_copy: Optional["UsageInfo"] = None, - used_by_register: Optional["UsageInfo"] = None, - **kwargs - ): + def __init__(self, *, disk_info=None, used_by_metadata=None, used_by_staging=None, used_by_copy=None, used_by_register=None, **kwargs) -> None: super(ImageStoreInfo, self).__init__(**kwargs) self.disk_info = disk_info self.used_by_metadata = used_by_metadata @@ -13532,17 +11363,17 @@ def __init__( self.used_by_register = used_by_register -class SecretResourcePropertiesBase(msrest.serialization.Model): - """This type describes the properties of a secret resource, including its kind. +class SecretResourcePropertiesBase(Model): + """This type describes the properties of a secret resource, including its + kind. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SecretResourceProperties. + sub-classes are: SecretResourceProperties All required parameters must be populated in order to send to Azure. - :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values - include: "inlinedValue", "keyVaultVersionedReference". - :type kind: str or ~azure.servicefabric.models.SecretKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -13557,36 +11388,35 @@ class SecretResourcePropertiesBase(msrest.serialization.Model): 'kind': {'SecretResourceProperties': 'SecretResourceProperties'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(SecretResourcePropertiesBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class SecretResourceProperties(SecretResourcePropertiesBase): """Describes the properties of a secret resource. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: InlinedValueSecretResourceProperties. + sub-classes are: InlinedValueSecretResourceProperties - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values - include: "inlinedValue", "keyVaultVersionedReference". - :type kind: str or ~azure.servicefabric.models.SecretKind + :param kind: Required. Constant filled by server. + :type kind: str :param description: User readable description of the secret. :type description: str - :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the resource. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the secret. + :ivar status_details: Gives additional information about the current + status of the secret. :vartype status_details: str - :param content_type: The type of the content stored in the secret value. The value of this - property is opaque to Service Fabric. Once set, the value of this property cannot be changed. + :param content_type: The type of the content stored in the secret value. + The value of this property is opaque to Service Fabric. Once set, the + value of this property cannot be changed. :type content_type: str """ @@ -13608,40 +11438,40 @@ class SecretResourceProperties(SecretResourcePropertiesBase): 'kind': {'inlinedValue': 'InlinedValueSecretResourceProperties'} } - def __init__( - self, - *, - description: Optional[str] = None, - content_type: Optional[str] = None, - **kwargs - ): + def __init__(self, *, description: str=None, content_type: str=None, **kwargs) -> None: super(SecretResourceProperties, self).__init__(**kwargs) - self.kind = 'SecretResourceProperties' # type: str self.description = description self.status = None self.status_details = None self.content_type = content_type + self.kind = 'SecretResourceProperties' class InlinedValueSecretResourceProperties(SecretResourceProperties): - """Describes the properties of a secret resource whose value is provided explicitly as plaintext. The secret resource may have multiple values, each being uniquely versioned. The secret value of each version is stored encrypted, and delivered as plaintext into the context of applications referencing it. + """Describes the properties of a secret resource whose value is provided + explicitly as plaintext. The secret resource may have multiple values, each + being uniquely versioned. The secret value of each version is stored + encrypted, and delivered as plaintext into the context of applications + referencing it. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. Describes the kind of secret.Constant filled by server. Possible values - include: "inlinedValue", "keyVaultVersionedReference". - :type kind: str or ~azure.servicefabric.models.SecretKind + :param kind: Required. Constant filled by server. + :type kind: str :param description: User readable description of the secret. :type description: str - :ivar status: Status of the resource. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the resource. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the secret. + :ivar status_details: Gives additional information about the current + status of the secret. :vartype status_details: str - :param content_type: The type of the content stored in the secret value. The value of this - property is opaque to Service Fabric. Once set, the value of this property cannot be changed. + :param content_type: The type of the content stored in the secret value. + The value of this property is opaque to Service Fabric. Once set, the + value of this property cannot be changed. :type content_type: str """ @@ -13659,22 +11489,16 @@ class InlinedValueSecretResourceProperties(SecretResourceProperties): 'content_type': {'key': 'contentType', 'type': 'str'}, } - def __init__( - self, - *, - description: Optional[str] = None, - content_type: Optional[str] = None, - **kwargs - ): + def __init__(self, *, description: str=None, content_type: str=None, **kwargs) -> None: super(InlinedValueSecretResourceProperties, self).__init__(description=description, content_type=content_type, **kwargs) - self.kind = 'inlinedValue' # type: str + self.kind = 'inlinedValue' -class InstanceLifecycleDescription(msrest.serialization.Model): +class InstanceLifecycleDescription(Model): """Describes how the instance will behave. - :param restore_replica_location_after_upgrade: If set to true, move/swap replica to original - location after upgrade. + :param restore_replica_location_after_upgrade: If set to true, move/swap + replica to original location after upgrade. :type restore_replica_location_after_upgrade: bool """ @@ -13682,12 +11506,7 @@ class InstanceLifecycleDescription(msrest.serialization.Model): 'restore_replica_location_after_upgrade': {'key': 'RestoreReplicaLocationAfterUpgrade', 'type': 'bool'}, } - def __init__( - self, - *, - restore_replica_location_after_upgrade: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, restore_replica_location_after_upgrade: bool=None, **kwargs) -> None: super(InstanceLifecycleDescription, self).__init__(**kwargs) self.restore_replica_location_after_upgrade = restore_replica_location_after_upgrade @@ -13697,10 +11516,8 @@ class Int64PropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str :param data: Required. The data of the property value. :type data: str """ @@ -13715,34 +11532,30 @@ class Int64PropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__( - self, - *, - data: str, - **kwargs - ): + def __init__(self, *, data: str, **kwargs) -> None: super(Int64PropertyValue, self).__init__(**kwargs) - self.kind = 'Int64' # type: str self.data = data + self.kind = 'Int64' -class PartitionInformation(msrest.serialization.Model): - """Information about the partition identity, partitioning scheme and keys supported by it. +class PartitionInformation(Model): + """Information about the partition identity, partitioning scheme and keys + supported by it. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, SingletonPartitionInformation. + sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, + SingletonPartitionInformation All required parameters must be populated in order to send to Azure. - :param service_partition_kind: Required. The kind of partitioning scheme used to partition the - service.Constant filled by server. Possible values include: "Invalid", "Singleton", - "Int64Range", "Named". - :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind - :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a - randomly generated GUID when the service was created. The partition ID is unique and does not - change for the lifetime of the service. If the same service was deleted and recreated the IDs - of its partitions would be different. + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str """ _validation = { @@ -13750,42 +11563,38 @@ class PartitionInformation(msrest.serialization.Model): } _attribute_map = { - 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, } _subtype_map = { 'service_partition_kind': {'Int64Range': 'Int64RangePartitionInformation', 'Named': 'NamedPartitionInformation', 'Singleton': 'SingletonPartitionInformation'} } - def __init__( - self, - *, - id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, id: str=None, **kwargs) -> None: super(PartitionInformation, self).__init__(**kwargs) - self.service_partition_kind = None # type: Optional[str] self.id = id + self.service_partition_kind = None class Int64RangePartitionInformation(PartitionInformation): - """Describes the partition information for the integer range that is based on partition schemes. + """Describes the partition information for the integer range that is based on + partition schemes. All required parameters must be populated in order to send to Azure. - :param service_partition_kind: Required. The kind of partitioning scheme used to partition the - service.Constant filled by server. Possible values include: "Invalid", "Singleton", - "Int64Range", "Named". - :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind - :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a - randomly generated GUID when the service was created. The partition ID is unique and does not - change for the lifetime of the service. If the same service was deleted and recreated the IDs - of its partitions would be different. + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str :param low_key: Specifies the minimum key value handled by this partition. :type low_key: str - :param high_key: Specifies the maximum key value handled by this partition. + :param high_key: Specifies the maximum key value handled by this + partition. :type high_key: str """ @@ -13794,34 +11603,28 @@ class Int64RangePartitionInformation(PartitionInformation): } _attribute_map = { - 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'low_key': {'key': 'LowKey', 'type': 'str'}, 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__( - self, - *, - id: Optional[str] = None, - low_key: Optional[str] = None, - high_key: Optional[str] = None, - **kwargs - ): + def __init__(self, *, id: str=None, low_key: str=None, high_key: str=None, **kwargs) -> None: super(Int64RangePartitionInformation, self).__init__(id=id, **kwargs) - self.service_partition_kind = 'Int64Range' # type: str self.low_key = low_key self.high_key = high_key + self.service_partition_kind = 'Int64Range' -class InvokeDataLossResult(msrest.serialization.Model): - """Represents information about an operation in a terminal state (Completed or Faulted). +class InvokeDataLossResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). - :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, - this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the partition that the - user-induced operation acted upon. + :param selected_partition: This class returns information about the + partition that the user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -13830,26 +11633,21 @@ class InvokeDataLossResult(msrest.serialization.Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__( - self, - *, - error_code: Optional[int] = None, - selected_partition: Optional["SelectedPartition"] = None, - **kwargs - ): + def __init__(self, *, error_code: int=None, selected_partition=None, **kwargs) -> None: super(InvokeDataLossResult, self).__init__(**kwargs) self.error_code = error_code self.selected_partition = selected_partition -class InvokeQuorumLossResult(msrest.serialization.Model): - """Represents information about an operation in a terminal state (Completed or Faulted). +class InvokeQuorumLossResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). - :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, - this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the partition that the - user-induced operation acted upon. + :param selected_partition: This class returns information about the + partition that the user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -13858,29 +11656,22 @@ class InvokeQuorumLossResult(msrest.serialization.Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__( - self, - *, - error_code: Optional[int] = None, - selected_partition: Optional["SelectedPartition"] = None, - **kwargs - ): + def __init__(self, *, error_code: int=None, selected_partition=None, **kwargs) -> None: super(InvokeQuorumLossResult, self).__init__(**kwargs) self.error_code = error_code self.selected_partition = selected_partition -class ReplicaStatusBase(msrest.serialization.Model): +class ReplicaStatusBase(Model): """Information about the replica. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: KeyValueStoreReplicaStatus. + sub-classes are: KeyValueStoreReplicaStatus All required parameters must be populated in order to send to Azure. - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Invalid", "KeyValueStore". - :type kind: str or ~azure.servicefabric.models.ReplicaKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -13895,12 +11686,9 @@ class ReplicaStatusBase(msrest.serialization.Model): 'kind': {'KeyValueStore': 'KeyValueStoreReplicaStatus'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(ReplicaStatusBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class KeyValueStoreReplicaStatus(ReplicaStatusBase): @@ -13908,22 +11696,24 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): All required parameters must be populated in order to send to Azure. - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Invalid", "KeyValueStore". - :type kind: str or ~azure.servicefabric.models.ReplicaKind - :param database_row_count_estimate: Value indicating the estimated number of rows in the - underlying database. + :param kind: Required. Constant filled by server. + :type kind: str + :param database_row_count_estimate: Value indicating the estimated number + of rows in the underlying database. :type database_row_count_estimate: str - :param database_logical_size_estimate: Value indicating the estimated size of the underlying - database. + :param database_logical_size_estimate: Value indicating the estimated size + of the underlying database. :type database_logical_size_estimate: str - :param copy_notification_current_key_filter: Value indicating the latest key-prefix filter - applied to enumeration during the callback. Null if there is no pending callback. + :param copy_notification_current_key_filter: Value indicating the latest + key-prefix filter applied to enumeration during the callback. Null if + there is no pending callback. :type copy_notification_current_key_filter: str - :param copy_notification_current_progress: Value indicating the latest number of keys - enumerated during the callback. 0 if there is no pending callback. + :param copy_notification_current_progress: Value indicating the latest + number of keys enumerated during the callback. 0 if there is no pending + callback. :type copy_notification_current_progress: str - :param status_details: Value indicating the current status details of the replica. + :param status_details: Value indicating the current status details of the + replica. :type status_details: str """ @@ -13940,45 +11730,41 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__( - self, - *, - database_row_count_estimate: Optional[str] = None, - database_logical_size_estimate: Optional[str] = None, - copy_notification_current_key_filter: Optional[str] = None, - copy_notification_current_progress: Optional[str] = None, - status_details: Optional[str] = None, - **kwargs - ): + def __init__(self, *, database_row_count_estimate: str=None, database_logical_size_estimate: str=None, copy_notification_current_key_filter: str=None, copy_notification_current_progress: str=None, status_details: str=None, **kwargs) -> None: super(KeyValueStoreReplicaStatus, self).__init__(**kwargs) - self.kind = 'KeyValueStore' # type: str self.database_row_count_estimate = database_row_count_estimate self.database_logical_size_estimate = database_logical_size_estimate self.copy_notification_current_key_filter = copy_notification_current_key_filter self.copy_notification_current_progress = copy_notification_current_progress self.status_details = status_details + self.kind = 'KeyValueStore' -class LoadedPartitionInformationQueryDescription(msrest.serialization.Model): +class LoadedPartitionInformationQueryDescription(Model): """Represents data structure that contains query information. - :param metric_name: Name of the metric for which this information is provided. + :param metric_name: Name of the metric for which this information is + provided. :type metric_name: str :param service_name: Name of the service this partition belongs to. :type service_name: str - :param ordering: Ordering of partitions' load. Possible values include: "Desc", "Asc". + :param ordering: Ordering of partitions' load. Possible values include: + 'Desc', 'Asc'. Default value: "Desc" . :type ordering: str or ~azure.servicefabric.models.Ordering - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. + :param max_results: The maximum number of results to be returned as part + of the paged queries. This parameter defines the upper bound on the number + of results returned. The results returned can be less than the specified + maximum results if they do not fit in the message as per the max message + size restrictions defined in the configuration. If this parameter is zero + or not specified, the paged query includes as many results as possible + that fit in the return message. :type max_results: long - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str """ @@ -13990,16 +11776,7 @@ class LoadedPartitionInformationQueryDescription(msrest.serialization.Model): 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, } - def __init__( - self, - *, - metric_name: Optional[str] = None, - service_name: Optional[str] = None, - ordering: Optional[Union[str, "Ordering"]] = None, - max_results: Optional[int] = None, - continuation_token: Optional[str] = None, - **kwargs - ): + def __init__(self, *, metric_name: str=None, service_name: str=None, ordering="Desc", max_results: int=None, continuation_token: str=None, **kwargs) -> None: super(LoadedPartitionInformationQueryDescription, self).__init__(**kwargs) self.metric_name = metric_name self.service_name = service_name @@ -14008,16 +11785,18 @@ def __init__( self.continuation_token = continuation_token -class LoadedPartitionInformationResult(msrest.serialization.Model): +class LoadedPartitionInformationResult(Model): """Represents partition information. All required parameters must be populated in order to send to Azure. - :param service_name: Required. Name of the service this partition belongs to. + :param service_name: Required. Name of the service this partition belongs + to. :type service_name: str :param partition_id: Required. Id of the partition. :type partition_id: str - :param metric_name: Required. Name of the metric for which this information is provided. + :param metric_name: Required. Name of the metric for which this + information is provided. :type metric_name: str :param load: Required. Load for metric. :type load: long @@ -14037,15 +11816,7 @@ class LoadedPartitionInformationResult(msrest.serialization.Model): 'load': {'key': 'Load', 'type': 'long'}, } - def __init__( - self, - *, - service_name: str, - partition_id: str, - metric_name: str, - load: int, - **kwargs - ): + def __init__(self, *, service_name: str, partition_id: str, metric_name: str, load: int, **kwargs) -> None: super(LoadedPartitionInformationResult, self).__init__(**kwargs) self.service_name = service_name self.partition_id = partition_id @@ -14053,17 +11824,20 @@ def __init__( self.load = load -class LoadedPartitionInformationResultList(msrest.serialization.Model): - """Represents data structure that contains top/least loaded partitions for a certain metric. +class LoadedPartitionInformationResultList(Model): + """Represents data structure that contains top/least loaded partitions for a + certain metric. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of application information. - :type items: list[~azure.servicefabric.models.LoadedPartitionInformationResult] + :type items: + list[~azure.servicefabric.models.LoadedPartitionInformationResult] """ _attribute_map = { @@ -14071,88 +11845,91 @@ class LoadedPartitionInformationResultList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[LoadedPartitionInformationResult]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["LoadedPartitionInformationResult"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(LoadedPartitionInformationResultList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class LoadMetricInformation(msrest.serialization.Model): - """Represents data structure that contains load information for a certain metric in a cluster. +class LoadMetricInformation(Model): + """Represents data structure that contains load information for a certain + metric in a cluster. - :param name: Name of the metric for which this load information is provided. + :param name: Name of the metric for which this load information is + provided. :type name: str - :param is_balanced_before: Value that indicates whether the metrics is balanced or not before - resource balancer run. + :param is_balanced_before: Value that indicates whether the metrics is + balanced or not before resource balancer run :type is_balanced_before: bool - :param is_balanced_after: Value that indicates whether the metrics is balanced or not after - resource balancer run. + :param is_balanced_after: Value that indicates whether the metrics is + balanced or not after resource balancer run. :type is_balanced_after: bool - :param deviation_before: The standard average deviation of the metrics before resource balancer - run. + :param deviation_before: The standard average deviation of the metrics + before resource balancer run. :type deviation_before: str - :param deviation_after: The standard average deviation of the metrics after resource balancer - run. + :param deviation_after: The standard average deviation of the metrics + after resource balancer run. :type deviation_after: str :param balancing_threshold: The balancing threshold for a certain metric. :type balancing_threshold: str - :param action: The current action being taken with regard to this metric. + :param action: The current action being taken with regard to this metric :type action: str - :param activity_threshold: The Activity Threshold specified for this metric in the system - Cluster Manifest. + :param activity_threshold: The Activity Threshold specified for this + metric in the system Cluster Manifest. :type activity_threshold: str - :param cluster_capacity: The total cluster capacity for a given metric. + :param cluster_capacity: The total cluster capacity for a given metric :type cluster_capacity: str - :param cluster_load: The total cluster load. In future releases of Service Fabric this - parameter will be deprecated in favor of CurrentClusterLoad. + :param cluster_load: The total cluster load. In future releases of Service + Fabric this parameter will be deprecated in favor of CurrentClusterLoad. :type cluster_load: str :param current_cluster_load: The total cluster load. :type current_cluster_load: str - :param cluster_remaining_capacity: The remaining capacity for the metric in the cluster. In - future releases of Service Fabric this parameter will be deprecated in favor of - ClusterCapacityRemaining. + :param cluster_remaining_capacity: The remaining capacity for the metric + in the cluster. In future releases of Service Fabric this parameter will + be deprecated in favor of ClusterCapacityRemaining. :type cluster_remaining_capacity: str - :param cluster_capacity_remaining: The remaining capacity for the metric in the cluster. + :param cluster_capacity_remaining: The remaining capacity for the metric + in the cluster. :type cluster_capacity_remaining: str - :param is_cluster_capacity_violation: Indicates that the metric is currently over capacity in - the cluster. + :param is_cluster_capacity_violation: Indicates that the metric is + currently over capacity in the cluster. :type is_cluster_capacity_violation: bool - :param node_buffer_percentage: The reserved percentage of total node capacity for this metric. + :param node_buffer_percentage: The reserved percentage of total node + capacity for this metric. :type node_buffer_percentage: str - :param cluster_buffered_capacity: Remaining capacity in the cluster excluding the reserved - space. In future releases of Service Fabric this parameter will be deprecated in favor of - BufferedClusterCapacityRemaining. + :param cluster_buffered_capacity: Remaining capacity in the cluster + excluding the reserved space. In future releases of Service Fabric this + parameter will be deprecated in favor of BufferedClusterCapacityRemaining. :type cluster_buffered_capacity: str - :param buffered_cluster_capacity_remaining: Remaining capacity in the cluster excluding the - reserved space. + :param buffered_cluster_capacity_remaining: Remaining capacity in the + cluster excluding the reserved space. :type buffered_cluster_capacity_remaining: str - :param cluster_remaining_buffered_capacity: The remaining percentage of cluster total capacity - for this metric. + :param cluster_remaining_buffered_capacity: The remaining percentage of + cluster total capacity for this metric. :type cluster_remaining_buffered_capacity: str - :param min_node_load_value: The minimum load on any node for this metric. In future releases of - Service Fabric this parameter will be deprecated in favor of MinimumNodeLoad. + :param min_node_load_value: The minimum load on any node for this metric. + In future releases of Service Fabric this parameter will be deprecated in + favor of MinimumNodeLoad. :type min_node_load_value: str :param minimum_node_load: The minimum load on any node for this metric. :type minimum_node_load: str - :param min_node_load_node_id: The node id of the node with the minimum load for this metric. + :param min_node_load_node_id: The node id of the node with the minimum + load for this metric. :type min_node_load_node_id: ~azure.servicefabric.models.NodeId - :param max_node_load_value: The maximum load on any node for this metric. In future releases of - Service Fabric this parameter will be deprecated in favor of MaximumNodeLoad. + :param max_node_load_value: The maximum load on any node for this metric. + In future releases of Service Fabric this parameter will be deprecated in + favor of MaximumNodeLoad. :type max_node_load_value: str :param maximum_node_load: The maximum load on any node for this metric. :type maximum_node_load: str - :param max_node_load_node_id: The node id of the node with the maximum load for this metric. + :param max_node_load_node_id: The node id of the node with the maximum + load for this metric. :type max_node_load_node_id: ~azure.servicefabric.models.NodeId - :param planned_load_removal: This value represents the load of the replicas that are planned to - be removed in the future within the cluster. - This kind of load is reported for replicas that are currently being moving to other nodes and - for replicas that are currently being dropped but still use the load on the source node. + :param planned_load_removal: This value represents the load of the + replicas that are planned to be removed in the future within the cluster. + This kind of load is reported for replicas that are currently being moving + to other nodes and for replicas that are currently being dropped but still + use the load on the source node. :type planned_load_removal: str """ @@ -14184,36 +11961,7 @@ class LoadMetricInformation(msrest.serialization.Model): 'planned_load_removal': {'key': 'PlannedLoadRemoval', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - is_balanced_before: Optional[bool] = None, - is_balanced_after: Optional[bool] = None, - deviation_before: Optional[str] = None, - deviation_after: Optional[str] = None, - balancing_threshold: Optional[str] = None, - action: Optional[str] = None, - activity_threshold: Optional[str] = None, - cluster_capacity: Optional[str] = None, - cluster_load: Optional[str] = None, - current_cluster_load: Optional[str] = None, - cluster_remaining_capacity: Optional[str] = None, - cluster_capacity_remaining: Optional[str] = None, - is_cluster_capacity_violation: Optional[bool] = None, - node_buffer_percentage: Optional[str] = None, - cluster_buffered_capacity: Optional[str] = None, - buffered_cluster_capacity_remaining: Optional[str] = None, - cluster_remaining_buffered_capacity: Optional[str] = None, - min_node_load_value: Optional[str] = None, - minimum_node_load: Optional[str] = None, - min_node_load_node_id: Optional["NodeId"] = None, - max_node_load_value: Optional[str] = None, - maximum_node_load: Optional[str] = None, - max_node_load_node_id: Optional["NodeId"] = None, - planned_load_removal: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, is_balanced_before: bool=None, is_balanced_after: bool=None, deviation_before: str=None, deviation_after: str=None, balancing_threshold: str=None, action: str=None, activity_threshold: str=None, cluster_capacity: str=None, cluster_load: str=None, current_cluster_load: str=None, cluster_remaining_capacity: str=None, cluster_capacity_remaining: str=None, is_cluster_capacity_violation: bool=None, node_buffer_percentage: str=None, cluster_buffered_capacity: str=None, buffered_cluster_capacity_remaining: str=None, cluster_remaining_buffered_capacity: str=None, min_node_load_value: str=None, minimum_node_load: str=None, min_node_load_node_id=None, max_node_load_value: str=None, maximum_node_load: str=None, max_node_load_node_id=None, planned_load_removal: str=None, **kwargs) -> None: super(LoadMetricInformation, self).__init__(**kwargs) self.name = name self.is_balanced_before = is_balanced_before @@ -14242,15 +11990,16 @@ def __init__( self.planned_load_removal = planned_load_removal -class LoadMetricReport(msrest.serialization.Model): - """Represents the load metric report which contains the time metric was reported, its name and value. +class LoadMetricReport(Model): + """Represents the load metric report which contains the time metric was + reported, its name and value. :param last_reported_utc: Gets the UTC time when the load was reported. - :type last_reported_utc: ~datetime.datetime + :type last_reported_utc: datetime :param name: The name of the load metric. :type name: str - :param value: The value of the load metric. In future releases of Service Fabric this parameter - will be deprecated in favor of CurrentValue. + :param value: The value of the load metric. In future releases of Service + Fabric this parameter will be deprecated in favor of CurrentValue. :type value: str :param current_value: The value of the load metric. :type current_value: str @@ -14263,15 +12012,7 @@ class LoadMetricReport(msrest.serialization.Model): 'current_value': {'key': 'CurrentValue', 'type': 'str'}, } - def __init__( - self, - *, - last_reported_utc: Optional[datetime.datetime] = None, - name: Optional[str] = None, - value: Optional[str] = None, - current_value: Optional[str] = None, - **kwargs - ): + def __init__(self, *, last_reported_utc=None, name: str=None, value: str=None, current_value: str=None, **kwargs) -> None: super(LoadMetricReport, self).__init__(**kwargs) self.last_reported_utc = last_reported_utc self.name = name @@ -14279,18 +12020,18 @@ def __init__( self.current_value = current_value -class LoadMetricReportInfo(msrest.serialization.Model): +class LoadMetricReportInfo(Model): """Information about load reported by replica. :param name: The name of the metric. :type name: str - :param value: The value of the load for the metric. In future releases of Service Fabric this - parameter will be deprecated in favor of CurrentValue. + :param value: The value of the load for the metric. In future releases of + Service Fabric this parameter will be deprecated in favor of CurrentValue. :type value: int :param current_value: The double value of the load for the metric. :type current_value: str :param last_reported_utc: The UTC time when the load is reported. - :type last_reported_utc: ~datetime.datetime + :type last_reported_utc: datetime """ _attribute_map = { @@ -14300,15 +12041,7 @@ class LoadMetricReportInfo(msrest.serialization.Model): 'last_reported_utc': {'key': 'LastReportedUtc', 'type': 'iso-8601'}, } - def __init__( - self, - *, - name: Optional[str] = None, - value: Optional[int] = None, - current_value: Optional[str] = None, - last_reported_utc: Optional[datetime.datetime] = None, - **kwargs - ): + def __init__(self, *, name: str=None, value: int=None, current_value: str=None, last_reported_utc=None, **kwargs) -> None: super(LoadMetricReportInfo, self).__init__(**kwargs) self.name = name self.value = value @@ -14316,17 +12049,17 @@ def __init__( self.last_reported_utc = last_reported_utc -class NetworkResourcePropertiesBase(msrest.serialization.Model): - """This type describes the properties of a network resource, including its kind. +class NetworkResourcePropertiesBase(Model): + """This type describes the properties of a network resource, including its + kind. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NetworkResourceProperties. + sub-classes are: NetworkResourceProperties All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of a Service Fabric container network.Constant filled by - server. Possible values include: "Local". - :type kind: str or ~azure.servicefabric.models.NetworkKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -14341,33 +12074,31 @@ class NetworkResourcePropertiesBase(msrest.serialization.Model): 'kind': {'NetworkResourceProperties': 'NetworkResourceProperties'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(NetworkResourcePropertiesBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class NetworkResourceProperties(NetworkResourcePropertiesBase): """Describes properties of a network resource. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LocalNetworkResourceProperties. + sub-classes are: LocalNetworkResourceProperties - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of a Service Fabric container network.Constant filled by - server. Possible values include: "Local". - :type kind: str or ~azure.servicefabric.models.NetworkKind + :param kind: Required. Constant filled by server. + :type kind: str :param description: User readable description of the network. :type description: str - :ivar status: Status of the network. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the network. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the network. + :ivar status_details: Gives additional information about the current + status of the network. :vartype status_details: str """ @@ -14388,37 +12119,35 @@ class NetworkResourceProperties(NetworkResourcePropertiesBase): 'kind': {'Local': 'LocalNetworkResourceProperties'} } - def __init__( - self, - *, - description: Optional[str] = None, - **kwargs - ): + def __init__(self, *, description: str=None, **kwargs) -> None: super(NetworkResourceProperties, self).__init__(**kwargs) - self.kind = 'NetworkResourceProperties' # type: str self.description = description self.status = None self.status_details = None + self.kind = 'NetworkResourceProperties' class LocalNetworkResourceProperties(NetworkResourceProperties): - """Information about a Service Fabric container network local to a single Service Fabric cluster. + """Information about a Service Fabric container network local to a single + Service Fabric cluster. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. - :param kind: Required. The type of a Service Fabric container network.Constant filled by - server. Possible values include: "Local". - :type kind: str or ~azure.servicefabric.models.NetworkKind + :param kind: Required. Constant filled by server. + :type kind: str :param description: User readable description of the network. :type description: str - :ivar status: Status of the network. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the network. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the network. + :ivar status_details: Gives additional information about the current + status of the network. :vartype status_details: str - :param network_address_prefix: Address space for the local container network. + :param network_address_prefix: Address space for the local container + network. :type network_address_prefix: str """ @@ -14436,19 +12165,13 @@ class LocalNetworkResourceProperties(NetworkResourceProperties): 'network_address_prefix': {'key': 'networkAddressPrefix', 'type': 'str'}, } - def __init__( - self, - *, - description: Optional[str] = None, - network_address_prefix: Optional[str] = None, - **kwargs - ): + def __init__(self, *, description: str=None, network_address_prefix: str=None, **kwargs) -> None: super(LocalNetworkResourceProperties, self).__init__(description=description, **kwargs) - self.kind = 'Local' # type: str self.network_address_prefix = network_address_prefix + self.kind = 'Local' -class ManagedApplicationIdentity(msrest.serialization.Model): +class ManagedApplicationIdentity(Model): """Describes a managed application identity. All required parameters must be populated in order to send to Azure. @@ -14468,25 +12191,20 @@ class ManagedApplicationIdentity(msrest.serialization.Model): 'principal_id': {'key': 'PrincipalId', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - principal_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str, principal_id: str=None, **kwargs) -> None: super(ManagedApplicationIdentity, self).__init__(**kwargs) self.name = name self.principal_id = principal_id -class ManagedApplicationIdentityDescription(msrest.serialization.Model): +class ManagedApplicationIdentityDescription(Model): """Managed application identity description. :param token_service_endpoint: Token service endpoint. :type token_service_endpoint: str :param managed_identities: A list of managed application identity objects. - :type managed_identities: list[~azure.servicefabric.models.ManagedApplicationIdentity] + :type managed_identities: + list[~azure.servicefabric.models.ManagedApplicationIdentity] """ _attribute_map = { @@ -14494,36 +12212,32 @@ class ManagedApplicationIdentityDescription(msrest.serialization.Model): 'managed_identities': {'key': 'ManagedIdentities', 'type': '[ManagedApplicationIdentity]'}, } - def __init__( - self, - *, - token_service_endpoint: Optional[str] = None, - managed_identities: Optional[List["ManagedApplicationIdentity"]] = None, - **kwargs - ): + def __init__(self, *, token_service_endpoint: str=None, managed_identities=None, **kwargs) -> None: super(ManagedApplicationIdentityDescription, self).__init__(**kwargs) self.token_service_endpoint = token_service_endpoint self.managed_identities = managed_identities class ManagedIdentityAzureBlobBackupStorageDescription(BackupStorageDescription): - """Describes the parameters for Azure blob store (connected using managed identity) used for storing and enumerating backups. + """Describes the parameters for Azure blob store (connected using managed + identity) used for storing and enumerating backups. All required parameters must be populated in order to send to Azure. - :param storage_kind: Required. The kind of backup storage, where backups are saved.Constant - filled by server. Possible values include: "Invalid", "FileShare", "AzureBlobStore", - "DsmsAzureBlobStore", "ManagedIdentityAzureBlobStore". - :type storage_kind: str or ~azure.servicefabric.models.BackupStorageKind :param friendly_name: Friendly name for this backup storage. :type friendly_name: str - :param managed_identity_type: Required. The type of managed identity to be used to connect to - Azure Blob Store via Managed Identity. Possible values include: "Invalid", "VMSS", "Cluster". - :type managed_identity_type: str or ~azure.servicefabric.models.ManagedIdentityType - :param blob_service_uri: Required. The Blob Service Uri to connect to the Azure blob store.. + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param managed_identity_type: Required. The type of managed identity to be + used to connect to Azure Blob Store via Managed Identity. Possible values + include: 'Invalid', 'VMSS', 'Cluster' + :type managed_identity_type: str or + ~azure.servicefabric.models.ManagedIdentityType + :param blob_service_uri: Required. The Blob Service Uri to connect to the + Azure blob store.. :type blob_service_uri: str - :param container_name: Required. The name of the container in the blob store to store and - enumerate backups from. + :param container_name: Required. The name of the container in the blob + store to store and enumerate backups from. :type container_name: str """ @@ -14535,30 +12249,22 @@ class ManagedIdentityAzureBlobBackupStorageDescription(BackupStorageDescription) } _attribute_map = { - 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, 'managed_identity_type': {'key': 'ManagedIdentityType', 'type': 'str'}, 'blob_service_uri': {'key': 'BlobServiceUri', 'type': 'str'}, 'container_name': {'key': 'ContainerName', 'type': 'str'}, } - def __init__( - self, - *, - managed_identity_type: Union[str, "ManagedIdentityType"], - blob_service_uri: str, - container_name: str, - friendly_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, managed_identity_type, blob_service_uri: str, container_name: str, friendly_name: str=None, **kwargs) -> None: super(ManagedIdentityAzureBlobBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) - self.storage_kind = 'ManagedIdentityAzureBlobStore' # type: str self.managed_identity_type = managed_identity_type self.blob_service_uri = blob_service_uri self.container_name = container_name + self.storage_kind = 'ManagedIdentityAzureBlobStore' -class MetricLoadDescription(msrest.serialization.Model): +class MetricLoadDescription(Model): """Specifies metric load information. :param metric_name: The name of the reported metric. @@ -14575,54 +12281,52 @@ class MetricLoadDescription(msrest.serialization.Model): 'predicted_load': {'key': 'PredictedLoad', 'type': 'long'}, } - def __init__( - self, - *, - metric_name: Optional[str] = None, - current_load: Optional[int] = None, - predicted_load: Optional[int] = None, - **kwargs - ): + def __init__(self, *, metric_name: str=None, current_load: int=None, predicted_load: int=None, **kwargs) -> None: super(MetricLoadDescription, self).__init__(**kwargs) self.metric_name = metric_name self.current_load = current_load self.predicted_load = predicted_load -class MonitoringPolicyDescription(msrest.serialization.Model): +class MonitoringPolicyDescription(Model): """Describes the parameters for monitoring an upgrade in Monitored mode. - :param failure_action: The compensating action to perform when a Monitored upgrade encounters - monitoring policy or health policy violations. - Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will - start rolling back automatically. - Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. Possible - values include: "Invalid", "Rollback", "Manual". + :param failure_action: The compensating action to perform when a Monitored + upgrade encounters monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that + the upgrade will start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade + mode. Possible values include: 'Invalid', 'Rollback', 'Manual' :type failure_action: str or ~azure.servicefabric.models.FailureAction - :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing - an upgrade domain before applying health policies. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted as a number - representing the total number of milliseconds. + :param health_check_wait_duration_in_milliseconds: The amount of time to + wait after completing an upgrade domain before applying health policies. + It is first interpreted as a string representing an ISO 8601 duration. If + that fails, then it is interpreted as a number representing the total + number of milliseconds. :type health_check_wait_duration_in_milliseconds: str - :param health_check_stable_duration_in_milliseconds: The amount of time that the application or - cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first - interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param health_check_stable_duration_in_milliseconds: The amount of time + that the application or cluster must remain healthy before the upgrade + proceeds to the next upgrade domain. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type health_check_stable_duration_in_milliseconds: str - :param health_check_retry_timeout_in_milliseconds: The amount of time to retry health - evaluation when the application or cluster is unhealthy before FailureAction is executed. It is - first interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param health_check_retry_timeout_in_milliseconds: The amount of time to + retry health evaluation when the application or cluster is unhealthy + before FailureAction is executed. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type health_check_retry_timeout_in_milliseconds: str - :param upgrade_timeout_in_milliseconds: The amount of time the overall upgrade has to complete - before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 - duration. If that fails, then it is interpreted as a number representing the total number of + :param upgrade_timeout_in_milliseconds: The amount of time the overall + upgrade has to complete before FailureAction is executed. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, + then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout_in_milliseconds: str - :param upgrade_domain_timeout_in_milliseconds: The amount of time each upgrade domain has to - complete before FailureAction is executed. It is first interpreted as a string representing an - ISO 8601 duration. If that fails, then it is interpreted as a number representing the total - number of milliseconds. + :param upgrade_domain_timeout_in_milliseconds: The amount of time each + upgrade domain has to complete before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that + fails, then it is interpreted as a number representing the total number of + milliseconds. :type upgrade_domain_timeout_in_milliseconds: str """ @@ -14635,17 +12339,7 @@ class MonitoringPolicyDescription(msrest.serialization.Model): 'upgrade_domain_timeout_in_milliseconds': {'key': 'UpgradeDomainTimeoutInMilliseconds', 'type': 'str'}, } - def __init__( - self, - *, - failure_action: Optional[Union[str, "FailureAction"]] = None, - health_check_wait_duration_in_milliseconds: Optional[str] = "0", - health_check_stable_duration_in_milliseconds: Optional[str] = "PT0H2M0S", - health_check_retry_timeout_in_milliseconds: Optional[str] = "PT0H10M0S", - upgrade_timeout_in_milliseconds: Optional[str] = "P10675199DT02H48M05.4775807S", - upgrade_domain_timeout_in_milliseconds: Optional[str] = "P10675199DT02H48M05.4775807S", - **kwargs - ): + def __init__(self, *, failure_action=None, health_check_wait_duration_in_milliseconds: str=None, health_check_stable_duration_in_milliseconds: str=None, health_check_retry_timeout_in_milliseconds: str=None, upgrade_timeout_in_milliseconds: str=None, upgrade_domain_timeout_in_milliseconds: str=None, **kwargs) -> None: super(MonitoringPolicyDescription, self).__init__(**kwargs) self.failure_action = failure_action self.health_check_wait_duration_in_milliseconds = health_check_wait_duration_in_milliseconds @@ -14655,12 +12349,13 @@ def __init__( self.upgrade_domain_timeout_in_milliseconds = upgrade_domain_timeout_in_milliseconds -class NameDescription(msrest.serialization.Model): +class NameDescription(Model): """Describes a Service Fabric name. All required parameters must be populated in order to send to Azure. - :param name: Required. The Service Fabric name, including the 'fabric:' URI scheme. + :param name: Required. The Service Fabric name, including the 'fabric:' + URI scheme. :type name: str """ @@ -14672,30 +12367,25 @@ class NameDescription(msrest.serialization.Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - **kwargs - ): + def __init__(self, *, name: str, **kwargs) -> None: super(NameDescription, self).__init__(**kwargs) self.name = name class NamedPartitionInformation(PartitionInformation): - """Describes the partition information for the name as a string that is based on partition schemes. + """Describes the partition information for the name as a string that is based + on partition schemes. All required parameters must be populated in order to send to Azure. - :param service_partition_kind: Required. The kind of partitioning scheme used to partition the - service.Constant filled by server. Possible values include: "Invalid", "Singleton", - "Int64Range", "Named". - :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind - :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a - randomly generated GUID when the service was created. The partition ID is unique and does not - change for the lifetime of the service. If the same service was deleted and recreated the IDs - of its partitions would be different. + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str :param name: Name of the partition. :type name: str """ @@ -14705,34 +12395,29 @@ class NamedPartitionInformation(PartitionInformation): } _attribute_map = { - 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - *, - id: Optional[str] = None, - name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, id: str=None, name: str=None, **kwargs) -> None: super(NamedPartitionInformation, self).__init__(id=id, **kwargs) - self.service_partition_kind = 'Named' # type: str self.name = name + self.service_partition_kind = 'Named' -class PartitionSchemeDescription(msrest.serialization.Model): +class PartitionSchemeDescription(Model): """Describes how the service is partitioned. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NamedPartitionSchemeDescription, SingletonPartitionSchemeDescription, UniformInt64RangePartitionSchemeDescription. + sub-classes are: NamedPartitionSchemeDescription, + SingletonPartitionSchemeDescription, + UniformInt64RangePartitionSchemeDescription All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by - server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". - :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str """ _validation = { @@ -14747,12 +12432,9 @@ class PartitionSchemeDescription(msrest.serialization.Model): 'partition_scheme': {'Named': 'NamedPartitionSchemeDescription', 'Singleton': 'SingletonPartitionSchemeDescription', 'UniformInt64Range': 'UniformInt64RangePartitionSchemeDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(PartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = None # type: Optional[str] + self.partition_scheme = None class NamedPartitionSchemeDescription(PartitionSchemeDescription): @@ -14760,13 +12442,12 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by - server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". - :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str :param count: Required. The number of partitions. :type count: int - :param names: Required. Array of size specified by the ‘Count’ parameter, for the names of the - partitions. + :param names: Required. Array of size specified by the ‘Count’ parameter, + for the names of the partitions. :type names: list[str] """ @@ -14782,25 +12463,20 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): 'names': {'key': 'Names', 'type': '[str]'}, } - def __init__( - self, - *, - count: int, - names: List[str], - **kwargs - ): + def __init__(self, *, count: int, names, **kwargs) -> None: super(NamedPartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = 'Named' # type: str self.count = count self.names = names + self.partition_scheme = 'Named' -class NetworkRef(msrest.serialization.Model): +class NetworkRef(Model): """Describes a network reference in a service. - :param name: Name of the network. + :param name: Name of the network :type name: str - :param endpoint_refs: A list of endpoints that are exposed on this network. + :param endpoint_refs: A list of endpoints that are exposed on this + network. :type endpoint_refs: list[~azure.servicefabric.models.EndpointRef] """ @@ -14809,19 +12485,13 @@ class NetworkRef(msrest.serialization.Model): 'endpoint_refs': {'key': 'endpointRefs', 'type': '[EndpointRef]'}, } - def __init__( - self, - *, - name: Optional[str] = None, - endpoint_refs: Optional[List["EndpointRef"]] = None, - **kwargs - ): + def __init__(self, *, name: str=None, endpoint_refs=None, **kwargs) -> None: super(NetworkRef, self).__init__(**kwargs) self.name = name self.endpoint_refs = endpoint_refs -class NetworkResourceDescription(msrest.serialization.Model): +class NetworkResourceDescription(Model): """This type describes a network resource. All required parameters must be populated in order to send to Azure. @@ -14842,13 +12512,7 @@ class NetworkResourceDescription(msrest.serialization.Model): 'properties': {'key': 'properties', 'type': 'NetworkResourceProperties'}, } - def __init__( - self, - *, - name: str, - properties: "NetworkResourceProperties", - **kwargs - ): + def __init__(self, *, name: str, properties, **kwargs) -> None: super(NetworkResourceDescription, self).__init__(**kwargs) self.name = name self.properties = properties @@ -14859,38 +12523,18 @@ class NodeAbortedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -14912,9 +12556,9 @@ class NodeAbortedEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -14927,11 +12571,11 @@ class NodeAbortedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -14943,26 +12587,8 @@ class NodeAbortedEvent(NodeEvent): 'node_version': {'key': 'NodeVersion', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance: int, - node_id: str, - upgrade_domain: str, - fault_domain: str, - ip_address_or_fqdn: str, - hostname: str, - is_seed_node: bool, - node_version: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeAbortedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeAborted' # type: str self.node_instance = node_instance self.node_id = node_id self.upgrade_domain = upgrade_domain @@ -14971,6 +12597,7 @@ def __init__( self.hostname = hostname self.is_seed_node = is_seed_node self.node_version = node_version + self.kind = 'NodeAborted' class NodeAddedToClusterEvent(NodeEvent): @@ -14978,38 +12605,18 @@ class NodeAddedToClusterEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -15027,9 +12634,9 @@ class NodeAddedToClusterEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -15040,11 +12647,11 @@ class NodeAddedToClusterEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, @@ -15054,30 +12661,15 @@ class NodeAddedToClusterEvent(NodeEvent): 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_id: str, - node_instance: int, - node_type: str, - fabric_version: str, - ip_address_or_fqdn: str, - node_capacities: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: int, node_type: str, fabric_version: str, ip_address_or_fqdn: str, node_capacities: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeAddedToClusterEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeAddedToCluster' # type: str self.node_id = node_id self.node_instance = node_instance self.node_type = node_type self.fabric_version = fabric_version self.ip_address_or_fqdn = ip_address_or_fqdn self.node_capacities = node_capacities + self.kind = 'NodeAddedToCluster' class NodeClosedEvent(NodeEvent): @@ -15085,38 +12677,18 @@ class NodeClosedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -15128,9 +12700,9 @@ class NodeClosedEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -15138,35 +12710,23 @@ class NodeClosedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'error': {'key': 'Error', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_id: str, - node_instance: int, - error: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: int, error: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeClosedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeClosed' # type: str self.node_id = node_id self.node_instance = node_instance self.error = error + self.kind = 'NodeClosed' class NodeDeactivateCompletedEvent(NodeEvent): @@ -15174,38 +12734,18 @@ class NodeDeactivateCompletedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -15215,13 +12755,13 @@ class NodeDeactivateCompletedEvent(NodeEvent): :param batch_ids_with_deactivate_intent: Required. Batch Ids. :type batch_ids_with_deactivate_intent: str :param start_time: Required. Start time. - :type start_time: ~datetime.datetime + :type start_time: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'effective_deactivate_intent': {'required': True}, @@ -15230,11 +12770,11 @@ class NodeDeactivateCompletedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'effective_deactivate_intent': {'key': 'EffectiveDeactivateIntent', 'type': 'str'}, @@ -15242,26 +12782,13 @@ class NodeDeactivateCompletedEvent(NodeEvent): 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance: int, - effective_deactivate_intent: str, - batch_ids_with_deactivate_intent: str, - start_time: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, effective_deactivate_intent: str, batch_ids_with_deactivate_intent: str, start_time, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeDeactivateCompletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeDeactivateCompleted' # type: str self.node_instance = node_instance self.effective_deactivate_intent = effective_deactivate_intent self.batch_ids_with_deactivate_intent = batch_ids_with_deactivate_intent self.start_time = start_time + self.kind = 'NodeDeactivateCompleted' class NodeDeactivateStartedEvent(NodeEvent): @@ -15269,38 +12796,18 @@ class NodeDeactivateStartedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -15312,9 +12819,9 @@ class NodeDeactivateStartedEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'batch_id': {'required': True}, @@ -15322,53 +12829,46 @@ class NodeDeactivateStartedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'batch_id': {'key': 'BatchId', 'type': 'str'}, 'deactivate_intent': {'key': 'DeactivateIntent', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance: int, - batch_id: str, - deactivate_intent: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, batch_id: str, deactivate_intent: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeDeactivateStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeDeactivateStarted' # type: str self.node_instance = node_instance self.batch_id = batch_id self.deactivate_intent = deactivate_intent + self.kind = 'NodeDeactivateStarted' -class NodeDeactivationInfo(msrest.serialization.Model): - """Information about the node deactivation. This information is valid for a node that is undergoing deactivation or has already been deactivated. +class NodeDeactivationInfo(Model): + """Information about the node deactivation. This information is valid for a + node that is undergoing deactivation or has already been deactivated. - :param node_deactivation_intent: The intent or the reason for deactivating the node. Following - are the possible values for it. Possible values include: "Invalid", "Pause", "Restart", - "RemoveData", "RemoveNode". - :type node_deactivation_intent: str or ~azure.servicefabric.models.NodeDeactivationIntent - :param node_deactivation_status: The status of node deactivation operation. Following are the - possible values. Possible values include: "None", "SafetyCheckInProgress", - "SafetyCheckComplete", "Completed". - :type node_deactivation_status: str or ~azure.servicefabric.models.NodeDeactivationStatus - :param node_deactivation_task: List of tasks representing the deactivation operation on the - node. - :type node_deactivation_task: list[~azure.servicefabric.models.NodeDeactivationTask] - :param pending_safety_checks: List of pending safety checks. - :type pending_safety_checks: list[~azure.servicefabric.models.SafetyCheckWrapper] + :param node_deactivation_intent: The intent or the reason for deactivating + the node. Following are the possible values for it. Possible values + include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' + :type node_deactivation_intent: str or + ~azure.servicefabric.models.NodeDeactivationIntent + :param node_deactivation_status: The status of node deactivation + operation. Following are the possible values. Possible values include: + 'None', 'SafetyCheckInProgress', 'SafetyCheckComplete', 'Completed' + :type node_deactivation_status: str or + ~azure.servicefabric.models.NodeDeactivationStatus + :param node_deactivation_task: List of tasks representing the deactivation + operation on the node. + :type node_deactivation_task: + list[~azure.servicefabric.models.NodeDeactivationTask] + :param pending_safety_checks: List of pending safety checks + :type pending_safety_checks: + list[~azure.servicefabric.models.SafetyCheckWrapper] """ _attribute_map = { @@ -15378,15 +12878,7 @@ class NodeDeactivationInfo(msrest.serialization.Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__( - self, - *, - node_deactivation_intent: Optional[Union[str, "NodeDeactivationIntent"]] = None, - node_deactivation_status: Optional[Union[str, "NodeDeactivationStatus"]] = None, - node_deactivation_task: Optional[List["NodeDeactivationTask"]] = None, - pending_safety_checks: Optional[List["SafetyCheckWrapper"]] = None, - **kwargs - ): + def __init__(self, *, node_deactivation_intent=None, node_deactivation_status=None, node_deactivation_task=None, pending_safety_checks=None, **kwargs) -> None: super(NodeDeactivationInfo, self).__init__(**kwargs) self.node_deactivation_intent = node_deactivation_intent self.node_deactivation_status = node_deactivation_status @@ -15394,16 +12886,18 @@ def __init__( self.pending_safety_checks = pending_safety_checks -class NodeDeactivationTask(msrest.serialization.Model): +class NodeDeactivationTask(Model): """The task representing the deactivation operation on the node. - :param node_deactivation_task_id: Identity of the task related to deactivation operation on the - node. - :type node_deactivation_task_id: ~azure.servicefabric.models.NodeDeactivationTaskId - :param node_deactivation_intent: The intent or the reason for deactivating the node. Following - are the possible values for it. Possible values include: "Invalid", "Pause", "Restart", - "RemoveData", "RemoveNode". - :type node_deactivation_intent: str or ~azure.servicefabric.models.NodeDeactivationIntent + :param node_deactivation_task_id: Identity of the task related to + deactivation operation on the node. + :type node_deactivation_task_id: + ~azure.servicefabric.models.NodeDeactivationTaskId + :param node_deactivation_intent: The intent or the reason for deactivating + the node. Following are the possible values for it. Possible values + include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' + :type node_deactivation_intent: str or + ~azure.servicefabric.models.NodeDeactivationIntent """ _attribute_map = { @@ -15411,27 +12905,22 @@ class NodeDeactivationTask(msrest.serialization.Model): 'node_deactivation_intent': {'key': 'NodeDeactivationIntent', 'type': 'str'}, } - def __init__( - self, - *, - node_deactivation_task_id: Optional["NodeDeactivationTaskId"] = None, - node_deactivation_intent: Optional[Union[str, "NodeDeactivationIntent"]] = None, - **kwargs - ): + def __init__(self, *, node_deactivation_task_id=None, node_deactivation_intent=None, **kwargs) -> None: super(NodeDeactivationTask, self).__init__(**kwargs) self.node_deactivation_task_id = node_deactivation_task_id self.node_deactivation_intent = node_deactivation_intent -class NodeDeactivationTaskId(msrest.serialization.Model): +class NodeDeactivationTaskId(Model): """Identity of the task related to deactivation operation on the node. :param id: Value of the task id. :type id: str - :param node_deactivation_task_type: The type of the task that performed the node deactivation. - Following are the possible values. Possible values include: "Invalid", "Infrastructure", - "Repair", "Client". - :type node_deactivation_task_type: str or ~azure.servicefabric.models.NodeDeactivationTaskType + :param node_deactivation_task_type: The type of the task that performed + the node deactivation. Following are the possible values. Possible values + include: 'Invalid', 'Infrastructure', 'Repair', 'Client' + :type node_deactivation_task_type: str or + ~azure.servicefabric.models.NodeDeactivationTaskType """ _attribute_map = { @@ -15439,13 +12928,7 @@ class NodeDeactivationTaskId(msrest.serialization.Model): 'node_deactivation_task_type': {'key': 'NodeDeactivationTaskType', 'type': 'str'}, } - def __init__( - self, - *, - id: Optional[str] = None, - node_deactivation_task_type: Optional[Union[str, "NodeDeactivationTaskType"]] = None, - **kwargs - ): + def __init__(self, *, id: str=None, node_deactivation_task_type=None, **kwargs) -> None: super(NodeDeactivationTaskId, self).__init__(**kwargs) self.id = id self.node_deactivation_task_type = node_deactivation_task_type @@ -15456,103 +12939,75 @@ class NodeDownEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. :type node_instance: long :param last_node_up_at: Required. Time when Node was last up. - :type last_node_up_at: ~datetime.datetime + :type last_node_up_at: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'last_node_up_at': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'last_node_up_at': {'key': 'LastNodeUpAt', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance: int, - last_node_up_at: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, last_node_up_at, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeDownEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeDown' # type: str self.node_instance = node_instance self.last_node_up_at = last_node_up_at + self.kind = 'NodeDown' class NodeHealth(EntityHealth): """Information about the health of a Service Fabric node. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: Name of the node whose health information is described by this object. + :param name: Name of the node whose health information is described by + this object. :type name: str """ @@ -15564,47 +13019,37 @@ class NodeHealth(EntityHealth): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, **kwargs) -> None: super(NodeHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.name = name class NodeHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a node, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a node, containing information about the + data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the node. The types of the unhealthy evaluations can be EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the node. The types of the + unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -15612,26 +13057,18 @@ class NodeHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - node_name: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, node_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: super(NodeHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Node' # type: str self.node_name = node_name self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Node' class NodeHealthReportExpiredEvent(NodeEvent): @@ -15639,38 +13076,18 @@ class NodeHealthReportExpiredEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -15687,16 +13104,17 @@ class NodeHealthReportExpiredEvent(NodeEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -15710,11 +13128,11 @@ class NodeHealthReportExpiredEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -15727,27 +13145,8 @@ class NodeHealthReportExpiredEvent(NodeEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeHealthReportExpired' # type: str self.node_instance_id = node_instance_id self.source_id = source_id self.property = property @@ -15757,19 +13156,23 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'NodeHealthReportExpired' class NodeHealthState(EntityHealthState): - """Represents the health state of a node, which contains the node identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + """Represents the health state of a node, which contains the node identifier + and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param name: The name of a Service Fabric node. :type name: str - :param id: An internal ID used by Service Fabric to uniquely identify a node. Node Id is - deterministically generated from node name. + :param id: An internal ID used by Service Fabric to uniquely identify a + node. Node Id is deterministically generated from node name. :type id: ~azure.servicefabric.models.NodeId """ @@ -15779,25 +13182,19 @@ class NodeHealthState(EntityHealthState): 'id': {'key': 'Id', 'type': 'NodeId'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - name: Optional[str] = None, - id: Optional["NodeId"] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, name: str=None, id=None, **kwargs) -> None: super(NodeHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.name = name self.id = id class NodeHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a node, which contains the node name and its aggregated health state. + """Represents the health state chunk of a node, which contains the node name + and its aggregated health state. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str @@ -15808,25 +13205,21 @@ class NodeHealthStateChunk(EntityHealthStateChunk): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - node_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, health_state=None, node_name: str=None, **kwargs) -> None: super(NodeHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.node_name = node_name class NodeHealthStateChunkList(EntityHealthStateChunkList): - """The list of node health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. + """The list of node health state chunks in the cluster that respect the input + filters in the chunk query. Returned by get cluster health state chunks + query. - :param total_count: Total number of entity health state objects that match the specified - filters from the cluster health chunk query description. + :param total_count: Total number of entity health state objects that match + the specified filters from the cluster health chunk query description. :type total_count: long - :param items: The list of node health state chunks that respect the input filters in the chunk - query. + :param items: The list of node health state chunks that respect the input + filters in the chunk query. :type items: list[~azure.servicefabric.models.NodeHealthStateChunk] """ @@ -15835,51 +13228,51 @@ class NodeHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[NodeHealthStateChunk]'}, } - def __init__( - self, - *, - total_count: Optional[int] = None, - items: Optional[List["NodeHealthStateChunk"]] = None, - **kwargs - ): + def __init__(self, *, total_count: int=None, items=None, **kwargs) -> None: super(NodeHealthStateChunkList, self).__init__(total_count=total_count, **kwargs) self.items = items -class NodeHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a node should be included in the returned cluster health chunk. -One filter can match zero, one or multiple nodes, depending on its properties. -Can be specified in the cluster health chunk query description. - - :param node_name_filter: Name of the node that matches the filter. The filter is applied only - to the specified node, if it exists. - If the node doesn't exist, no node is returned in the cluster health chunk based on this - filter. - If the node exists, it is included in the cluster health chunk if the health state matches the - other filter properties. - If not specified, all nodes that match the parent filters (if any) are taken into - consideration and matched against the other filter members, like health state filter. +class NodeHealthStateFilter(Model): + """Defines matching criteria to determine whether a node should be included in + the returned cluster health chunk. + One filter can match zero, one or multiple nodes, depending on its + properties. + Can be specified in the cluster health chunk query description. + + :param node_name_filter: Name of the node that matches the filter. The + filter is applied only to the specified node, if it exists. + If the node doesn't exist, no node is returned in the cluster health chunk + based on this filter. + If the node exists, it is included in the cluster health chunk if the + health state matches the other filter properties. + If not specified, all nodes that match the parent filters (if any) are + taken into consideration and matched against the other filter members, + like health state filter. :type node_name_filter: str - :param health_state_filter: The filter for the health state of the nodes. It allows selecting - nodes if they match the desired health states. - The possible values are integer value of one of the following health states. Only nodes that - match the filter are returned. All nodes are used to evaluate the cluster aggregated health - state. - If not specified, default value is None, unless the node name is specified. If the filter has - default value and node name is specified, the matching node is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches nodes with HealthState value of OK (2) and - Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the nodes. + It allows selecting nodes if they match the desired health states. + The possible values are integer value of one of the following health + states. Only nodes that match the filter are returned. All nodes are used + to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the node name is + specified. If the filter has default value and node name is specified, the + matching node is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches nodes with HealthState + value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int """ @@ -15888,20 +13281,15 @@ class NodeHealthStateFilter(msrest.serialization.Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__( - self, - *, - node_name_filter: Optional[str] = None, - health_state_filter: Optional[int] = 0, - **kwargs - ): + def __init__(self, *, node_name_filter: str=None, health_state_filter: int=0, **kwargs) -> None: super(NodeHealthStateFilter, self).__init__(**kwargs) self.node_name_filter = node_name_filter self.health_state_filter = health_state_filter -class NodeId(msrest.serialization.Model): - """An internal ID used by Service Fabric to uniquely identify a node. Node Id is deterministically generated from node name. +class NodeId(Model): + """An internal ID used by Service Fabric to uniquely identify a node. Node Id + is deterministically generated from node name. :param id: Value of the node Id. This is a 128 bit integer. :type id: str @@ -15911,27 +13299,22 @@ class NodeId(msrest.serialization.Model): 'id': {'key': 'Id', 'type': 'str'}, } - def __init__( - self, - *, - id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, id: str=None, **kwargs) -> None: super(NodeId, self).__init__(**kwargs) self.id = id -class NodeImpact(msrest.serialization.Model): +class NodeImpact(Model): """Describes the expected impact of a repair to a particular node. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param node_name: Required. The name of the impacted node. :type node_name: str - :param impact_level: The level of impact expected. Possible values include: "Invalid", "None", - "Restart", "RemoveData", "RemoveNode". + :param impact_level: The level of impact expected. Possible values + include: 'Invalid', 'None', 'Restart', 'RemoveData', 'RemoveNode' :type impact_level: str or ~azure.servicefabric.models.ImpactLevel """ @@ -15944,72 +13327,73 @@ class NodeImpact(msrest.serialization.Model): 'impact_level': {'key': 'ImpactLevel', 'type': 'str'}, } - def __init__( - self, - *, - node_name: str, - impact_level: Optional[Union[str, "ImpactLevel"]] = None, - **kwargs - ): + def __init__(self, *, node_name: str, impact_level=None, **kwargs) -> None: super(NodeImpact, self).__init__(**kwargs) self.node_name = node_name self.impact_level = impact_level -class NodeInfo(msrest.serialization.Model): +class NodeInfo(Model): """Information about a node in Service Fabric cluster. :param name: The name of a Service Fabric node. :type name: str - :param ip_address_or_fqdn: The IP address or fully qualified domain name of the node. + :param ip_address_or_fqdn: The IP address or fully qualified domain name + of the node. :type ip_address_or_fqdn: str :param type: The type of the node. :type type: str - :param code_version: The version of Service Fabric binaries that the node is running. + :param code_version: The version of Service Fabric binaries that the node + is running. :type code_version: str - :param config_version: The version of Service Fabric cluster manifest that the node is using. + :param config_version: The version of Service Fabric cluster manifest that + the node is using. :type config_version: str - :param node_status: The status of the node. Possible values include: "Invalid", "Up", "Down", - "Enabling", "Disabling", "Disabled", "Unknown", "Removed". + :param node_status: The status of the node. Possible values include: + 'Invalid', 'Up', 'Down', 'Enabling', 'Disabling', 'Disabled', 'Unknown', + 'Removed' :type node_status: str or ~azure.servicefabric.models.NodeStatus - :param node_up_time_in_seconds: Time in seconds since the node has been in NodeStatus Up. Value - zero indicates that the node is not Up. + :param node_up_time_in_seconds: Time in seconds since the node has been in + NodeStatus Up. Value zero indicates that the node is not Up. :type node_up_time_in_seconds: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param is_seed_node: Indicates if the node is a seed node or not. Returns true if the node is a - seed node, otherwise false. A quorum of seed nodes are required for proper operation of Service - Fabric cluster. + :param is_seed_node: Indicates if the node is a seed node or not. Returns + true if the node is a seed node, otherwise false. A quorum of seed nodes + are required for proper operation of Service Fabric cluster. :type is_seed_node: bool :param upgrade_domain: The upgrade domain of the node. :type upgrade_domain: str :param fault_domain: The fault domain of the node. :type fault_domain: str - :param id: An internal ID used by Service Fabric to uniquely identify a node. Node Id is - deterministically generated from node name. + :param id: An internal ID used by Service Fabric to uniquely identify a + node. Node Id is deterministically generated from node name. :type id: ~azure.servicefabric.models.NodeId - :param instance_id: The ID representing the node instance. While the ID of the node is - deterministically generated from the node name and remains same across restarts, the InstanceId - changes every time node restarts. + :param instance_id: The ID representing the node instance. While the ID of + the node is deterministically generated from the node name and remains + same across restarts, the InstanceId changes every time node restarts. :type instance_id: str - :param node_deactivation_info: Information about the node deactivation. This information is - valid for a node that is undergoing deactivation or has already been deactivated. - :type node_deactivation_info: ~azure.servicefabric.models.NodeDeactivationInfo - :param is_stopped: Indicates if the node is stopped by calling stop node API or not. Returns - true if the node is stopped, otherwise false. + :param node_deactivation_info: Information about the node deactivation. + This information is valid for a node that is undergoing deactivation or + has already been deactivated. + :type node_deactivation_info: + ~azure.servicefabric.models.NodeDeactivationInfo + :param is_stopped: Indicates if the node is stopped by calling stop node + API or not. Returns true if the node is stopped, otherwise false. :type is_stopped: bool - :param node_down_time_in_seconds: Time in seconds since the node has been in NodeStatus Down. - Value zero indicates node is not NodeStatus Down. + :param node_down_time_in_seconds: Time in seconds since the node has been + in NodeStatus Down. Value zero indicates node is not NodeStatus Down. :type node_down_time_in_seconds: str - :param node_up_at: Date time in UTC when the node came up. If the node has never been up then - this value will be zero date time. - :type node_up_at: ~datetime.datetime - :param node_down_at: Date time in UTC when the node went down. If node has never been down then - this value will be zero date time. - :type node_down_at: ~datetime.datetime - :param node_tags: List that contains tags, which will be applied to the nodes. + :param node_up_at: Date time in UTC when the node came up. If the node has + never been up then this value will be zero date time. + :type node_up_at: datetime + :param node_down_at: Date time in UTC when the node went down. If node has + never been down then this value will be zero date time. + :type node_down_at: datetime + :param node_tags: List that contains tags, which will be applied to the + nodes. :type node_tags: list[str] """ @@ -16035,30 +13419,7 @@ class NodeInfo(msrest.serialization.Model): 'node_tags': {'key': 'NodeTags', 'type': '[str]'}, } - def __init__( - self, - *, - name: Optional[str] = None, - ip_address_or_fqdn: Optional[str] = None, - type: Optional[str] = None, - code_version: Optional[str] = None, - config_version: Optional[str] = None, - node_status: Optional[Union[str, "NodeStatus"]] = None, - node_up_time_in_seconds: Optional[str] = None, - health_state: Optional[Union[str, "HealthState"]] = None, - is_seed_node: Optional[bool] = None, - upgrade_domain: Optional[str] = None, - fault_domain: Optional[str] = None, - id: Optional["NodeId"] = None, - instance_id: Optional[str] = None, - node_deactivation_info: Optional["NodeDeactivationInfo"] = None, - is_stopped: Optional[bool] = None, - node_down_time_in_seconds: Optional[str] = None, - node_up_at: Optional[datetime.datetime] = None, - node_down_at: Optional[datetime.datetime] = None, - node_tags: Optional[List[str]] = None, - **kwargs - ): + def __init__(self, *, name: str=None, ip_address_or_fqdn: str=None, type: str=None, code_version: str=None, config_version: str=None, node_status=None, node_up_time_in_seconds: str=None, health_state=None, is_seed_node: bool=None, upgrade_domain: str=None, fault_domain: str=None, id=None, instance_id: str=None, node_deactivation_info=None, is_stopped: bool=None, node_down_time_in_seconds: str=None, node_up_at=None, node_down_at=None, node_tags=None, **kwargs) -> None: super(NodeInfo, self).__init__(**kwargs) self.name = name self.ip_address_or_fqdn = ip_address_or_fqdn @@ -16081,14 +13442,17 @@ def __init__( self.node_tags = node_tags -class NodeLoadInfo(msrest.serialization.Model): - """Information about load on a Service Fabric node. It holds a summary of all metrics and their load on a node. +class NodeLoadInfo(Model): + """Information about load on a Service Fabric node. It holds a summary of all + metrics and their load on a node. - :param node_name: Name of the node for which the load information is provided by this object. + :param node_name: Name of the node for which the load information is + provided by this object. :type node_name: str - :param node_load_metric_information: List that contains metrics and their load information on - this node. - :type node_load_metric_information: list[~azure.servicefabric.models.NodeLoadMetricInformation] + :param node_load_metric_information: List that contains metrics and their + load information on this node. + :type node_load_metric_information: + list[~azure.servicefabric.models.NodeLoadMetricInformation] """ _attribute_map = { @@ -16096,52 +13460,52 @@ class NodeLoadInfo(msrest.serialization.Model): 'node_load_metric_information': {'key': 'NodeLoadMetricInformation', 'type': '[NodeLoadMetricInformation]'}, } - def __init__( - self, - *, - node_name: Optional[str] = None, - node_load_metric_information: Optional[List["NodeLoadMetricInformation"]] = None, - **kwargs - ): + def __init__(self, *, node_name: str=None, node_load_metric_information=None, **kwargs) -> None: super(NodeLoadInfo, self).__init__(**kwargs) self.node_name = node_name self.node_load_metric_information = node_load_metric_information -class NodeLoadMetricInformation(msrest.serialization.Model): - """Represents data structure that contains load information for a certain metric on a node. +class NodeLoadMetricInformation(Model): + """Represents data structure that contains load information for a certain + metric on a node. - :param name: Name of the metric for which this load information is provided. + :param name: Name of the metric for which this load information is + provided. :type name: str :param node_capacity: Total capacity on the node for this metric. :type node_capacity: str - :param node_load: Current load on the node for this metric. In future releases of Service - Fabric this parameter will be deprecated in favor of CurrentNodeLoad. + :param node_load: Current load on the node for this metric. In future + releases of Service Fabric this parameter will be deprecated in favor of + CurrentNodeLoad. :type node_load: str - :param node_remaining_capacity: The remaining capacity on the node for this metric. In future - releases of Service Fabric this parameter will be deprecated in favor of NodeCapacityRemaining. + :param node_remaining_capacity: The remaining capacity on the node for + this metric. In future releases of Service Fabric this parameter will be + deprecated in favor of NodeCapacityRemaining. :type node_remaining_capacity: str - :param is_capacity_violation: Indicates if there is a capacity violation for this metric on the - node. + :param is_capacity_violation: Indicates if there is a capacity violation + for this metric on the node. :type is_capacity_violation: bool - :param node_buffered_capacity: The value that indicates the reserved capacity for this metric - on the node. + :param node_buffered_capacity: The value that indicates the reserved + capacity for this metric on the node. :type node_buffered_capacity: str - :param node_remaining_buffered_capacity: The remaining reserved capacity for this metric on the - node. In future releases of Service Fabric this parameter will be deprecated in favor of - BufferedNodeCapacityRemaining. + :param node_remaining_buffered_capacity: The remaining reserved capacity + for this metric on the node. In future releases of Service Fabric this + parameter will be deprecated in favor of BufferedNodeCapacityRemaining. :type node_remaining_buffered_capacity: str :param current_node_load: Current load on the node for this metric. :type current_node_load: str - :param node_capacity_remaining: The remaining capacity on the node for the metric. + :param node_capacity_remaining: The remaining capacity on the node for the + metric. :type node_capacity_remaining: str - :param buffered_node_capacity_remaining: The remaining capacity which is not reserved by - NodeBufferPercentage for this metric on the node. + :param buffered_node_capacity_remaining: The remaining capacity which is + not reserved by NodeBufferPercentage for this metric on the node. :type buffered_node_capacity_remaining: str - :param planned_node_load_removal: This value represents the load of the replicas that are - planned to be removed in the future. - This kind of load is reported for replicas that are currently being moving to other nodes and - for replicas that are currently being dropped but still use the load on the source node. + :param planned_node_load_removal: This value represents the load of the + replicas that are planned to be removed in the future. + This kind of load is reported for replicas that are currently being moving + to other nodes and for replicas that are currently being dropped but still + use the load on the source node. :type planned_node_load_removal: str """ @@ -16159,22 +13523,7 @@ class NodeLoadMetricInformation(msrest.serialization.Model): 'planned_node_load_removal': {'key': 'PlannedNodeLoadRemoval', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - node_capacity: Optional[str] = None, - node_load: Optional[str] = None, - node_remaining_capacity: Optional[str] = None, - is_capacity_violation: Optional[bool] = None, - node_buffered_capacity: Optional[str] = None, - node_remaining_buffered_capacity: Optional[str] = None, - current_node_load: Optional[str] = None, - node_capacity_remaining: Optional[str] = None, - buffered_node_capacity_remaining: Optional[str] = None, - planned_node_load_removal: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, node_capacity: str=None, node_load: str=None, node_remaining_capacity: str=None, is_capacity_violation: bool=None, node_buffered_capacity: str=None, node_remaining_buffered_capacity: str=None, current_node_load: str=None, node_capacity_remaining: str=None, buffered_node_capacity_remaining: str=None, planned_node_load_removal: str=None, **kwargs) -> None: super(NodeLoadMetricInformation, self).__init__(**kwargs) self.name = name self.node_capacity = node_capacity @@ -16194,38 +13543,18 @@ class NodeNewHealthReportEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance_id: Required. Id of Node instance. @@ -16242,16 +13571,17 @@ class NodeNewHealthReportEvent(NodeEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, 'source_id': {'required': True}, @@ -16265,11 +13595,11 @@ class NodeNewHealthReportEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -16282,27 +13612,8 @@ class NodeNewHealthReportEvent(NodeEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeNewHealthReport' # type: str self.node_instance_id = node_instance_id self.source_id = source_id self.property = property @@ -16312,6 +13623,7 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'NodeNewHealthReport' class NodeOpenFailedEvent(NodeEvent): @@ -16319,38 +13631,18 @@ class NodeOpenFailedEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -16374,9 +13666,9 @@ class NodeOpenFailedEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -16390,11 +13682,11 @@ class NodeOpenFailedEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -16407,27 +13699,8 @@ class NodeOpenFailedEvent(NodeEvent): 'error': {'key': 'Error', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance: int, - node_id: str, - upgrade_domain: str, - fault_domain: str, - ip_address_or_fqdn: str, - hostname: str, - is_seed_node: bool, - node_version: str, - error: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, error: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeOpenFailedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeOpenFailed' # type: str self.node_instance = node_instance self.node_id = node_id self.upgrade_domain = upgrade_domain @@ -16437,6 +13710,7 @@ def __init__( self.is_seed_node = is_seed_node self.node_version = node_version self.error = error + self.kind = 'NodeOpenFailed' class NodeOpenSucceededEvent(NodeEvent): @@ -16444,38 +13718,18 @@ class NodeOpenSucceededEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. @@ -16497,9 +13751,9 @@ class NodeOpenSucceededEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'node_id': {'required': True}, @@ -16512,11 +13766,11 @@ class NodeOpenSucceededEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, @@ -16528,26 +13782,8 @@ class NodeOpenSucceededEvent(NodeEvent): 'node_version': {'key': 'NodeVersion', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance: int, - node_id: str, - upgrade_domain: str, - fault_domain: str, - ip_address_or_fqdn: str, - hostname: str, - is_seed_node: bool, - node_version: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeOpenSucceededEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeOpenSucceeded' # type: str self.node_instance = node_instance self.node_id = node_id self.upgrade_domain = upgrade_domain @@ -16556,6 +13792,7 @@ def __init__( self.hostname = hostname self.is_seed_node = is_seed_node self.node_version = node_version + self.kind = 'NodeOpenSucceeded' class NodeRemovedFromClusterEvent(NodeEvent): @@ -16563,38 +13800,18 @@ class NodeRemovedFromClusterEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_id: Required. Id of Node. @@ -16612,9 +13829,9 @@ class NodeRemovedFromClusterEvent(NodeEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_id': {'required': True}, 'node_instance': {'required': True}, @@ -16625,11 +13842,11 @@ class NodeRemovedFromClusterEvent(NodeEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_id': {'key': 'NodeId', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, @@ -16639,45 +13856,29 @@ class NodeRemovedFromClusterEvent(NodeEvent): 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_id: str, - node_instance: int, - node_type: str, - fabric_version: str, - ip_address_or_fqdn: str, - node_capacities: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: int, node_type: str, fabric_version: str, ip_address_or_fqdn: str, node_capacities: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeRemovedFromClusterEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeRemovedFromCluster' # type: str self.node_id = node_id self.node_instance = node_instance self.node_type = node_type self.fabric_version = fabric_version self.ip_address_or_fqdn = ip_address_or_fqdn self.node_capacities = node_capacities + self.kind = 'NodeRemovedFromCluster' -class RepairImpactDescriptionBase(msrest.serialization.Model): +class RepairImpactDescriptionBase(Model): """Describes the expected impact of executing a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeRepairImpactDescription. + sub-classes are: NodeRepairImpactDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of repair impact represented by the current object.Constant - filled by server. Possible values include: "Invalid", "Node". - :type kind: str or ~azure.servicefabric.models.RepairImpactKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -16692,26 +13893,22 @@ class RepairImpactDescriptionBase(msrest.serialization.Model): 'kind': {'Node': 'NodeRepairImpactDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(RepairImpactDescriptionBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class NodeRepairImpactDescription(RepairImpactDescriptionBase): """Describes the expected impact of a repair on a set of nodes. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of repair impact represented by the current object.Constant - filled by server. Possible values include: "Invalid", "Node". - :type kind: str or ~azure.servicefabric.models.RepairImpactKind - :param node_impact_list: The list of nodes impacted by a repair action and their respective - expected impact. + :param kind: Required. Constant filled by server. + :type kind: str + :param node_impact_list: The list of nodes impacted by a repair action and + their respective expected impact. :type node_impact_list: list[~azure.servicefabric.models.NodeImpact] """ @@ -16724,30 +13921,24 @@ class NodeRepairImpactDescription(RepairImpactDescriptionBase): 'node_impact_list': {'key': 'NodeImpactList', 'type': '[NodeImpact]'}, } - def __init__( - self, - *, - node_impact_list: Optional[List["NodeImpact"]] = None, - **kwargs - ): + def __init__(self, *, node_impact_list=None, **kwargs) -> None: super(NodeRepairImpactDescription, self).__init__(**kwargs) - self.kind = 'Node' # type: str self.node_impact_list = node_impact_list + self.kind = 'Node' -class RepairTargetDescriptionBase(msrest.serialization.Model): +class RepairTargetDescriptionBase(Model): """Describes the entities targeted by a repair action. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: NodeRepairTargetDescription. + sub-classes are: NodeRepairTargetDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of repair target described by the current object.Constant - filled by server. Possible values include: "Invalid", "Node". - :type kind: str or ~azure.servicefabric.models.RepairTargetKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -16762,24 +13953,20 @@ class RepairTargetDescriptionBase(msrest.serialization.Model): 'kind': {'Node': 'NodeRepairTargetDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(RepairTargetDescriptionBase, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class NodeRepairTargetDescription(RepairTargetDescriptionBase): """Describes the list of nodes targeted by a repair action. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of repair target described by the current object.Constant - filled by server. Possible values include: "Invalid", "Node". - :type kind: str or ~azure.servicefabric.models.RepairTargetKind + :param kind: Required. Constant filled by server. + :type kind: str :param node_names: The list of nodes targeted by a repair action. :type node_names: list[str] """ @@ -16793,19 +13980,15 @@ class NodeRepairTargetDescription(RepairTargetDescriptionBase): 'node_names': {'key': 'NodeNames', 'type': '[str]'}, } - def __init__( - self, - *, - node_names: Optional[List[str]] = None, - **kwargs - ): + def __init__(self, *, node_names=None, **kwargs) -> None: super(NodeRepairTargetDescription, self).__init__(**kwargs) - self.kind = 'Node' # type: str self.node_names = node_names + self.kind = 'Node' -class NodeResult(msrest.serialization.Model): - """Contains information about a node that was targeted by a user-induced operation. +class NodeResult(Model): + """Contains information about a node that was targeted by a user-induced + operation. :param node_name: The name of a Service Fabric node. :type node_name: str @@ -16818,48 +14001,41 @@ class NodeResult(msrest.serialization.Model): 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, } - def __init__( - self, - *, - node_name: Optional[str] = None, - node_instance_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, node_name: str=None, node_instance_id: str=None, **kwargs) -> None: super(NodeResult, self).__init__(**kwargs) self.node_name = node_name self.node_instance_id = node_instance_id class NodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for nodes, containing health evaluations for each unhealthy node that impacted current aggregated health state. Can be returned when evaluating cluster health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for nodes, containing health evaluations for + each unhealthy node that impacted current aggregated health state. Can be + returned when evaluating cluster health and the aggregated health state is + either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes from the - ClusterHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_nodes: Maximum allowed percentage of + unhealthy nodes from the ClusterHealthPolicy. :type max_percent_unhealthy_nodes: int :param total_count: Total number of nodes found in the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -16867,40 +14043,31 @@ class NodesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - max_percent_unhealthy_nodes: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(NodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Nodes' # type: str self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Nodes' -class NodeTagsDescription(msrest.serialization.Model): +class NodeTagsDescription(Model): """Describes the tags required for placement or running of the service. All required parameters must be populated in order to send to Azure. :param count: Required. The number of tags. :type count: int - :param tags: Required. A set of tags. Array of size specified by the ‘Count’ parameter, for the - placement tags of the service. + :param tags: Required. Array of size specified by the ‘Count’ parameter, + for the placement tags of the service. :type tags: list[str] """ @@ -16914,28 +14081,26 @@ class NodeTagsDescription(msrest.serialization.Model): 'tags': {'key': 'Tags', 'type': '[str]'}, } - def __init__( - self, - *, - count: int, - tags: List[str], - **kwargs - ): + def __init__(self, *, count: int, tags, **kwargs) -> None: super(NodeTagsDescription, self).__init__(**kwargs) self.count = count self.tags = tags -class NodeTransitionProgress(msrest.serialization.Model): - """Information about an NodeTransition operation. This class contains an OperationState and a NodeTransitionResult. The NodeTransitionResult is not valid until OperationState -is Completed or Faulted. +class NodeTransitionProgress(Model): + """Information about an NodeTransition operation. This class contains an + OperationState and a NodeTransitionResult. The NodeTransitionResult is not + valid until OperationState + is Completed or Faulted. - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param node_transition_result: Represents information about an operation in a terminal state - (Completed or Faulted). - :type node_transition_result: ~azure.servicefabric.models.NodeTransitionResult + :param node_transition_result: Represents information about an operation + in a terminal state (Completed or Faulted). + :type node_transition_result: + ~azure.servicefabric.models.NodeTransitionResult """ _attribute_map = { @@ -16943,26 +14108,21 @@ class NodeTransitionProgress(msrest.serialization.Model): 'node_transition_result': {'key': 'NodeTransitionResult', 'type': 'NodeTransitionResult'}, } - def __init__( - self, - *, - state: Optional[Union[str, "OperationState"]] = None, - node_transition_result: Optional["NodeTransitionResult"] = None, - **kwargs - ): + def __init__(self, *, state=None, node_transition_result=None, **kwargs) -> None: super(NodeTransitionProgress, self).__init__(**kwargs) self.state = state self.node_transition_result = node_transition_result -class NodeTransitionResult(msrest.serialization.Model): - """Represents information about an operation in a terminal state (Completed or Faulted). +class NodeTransitionResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). - :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, - this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. :type error_code: int - :param node_result: Contains information about a node that was targeted by a user-induced - operation. + :param node_result: Contains information about a node that was targeted by + a user-induced operation. :type node_result: ~azure.servicefabric.models.NodeResult """ @@ -16971,33 +14131,29 @@ class NodeTransitionResult(msrest.serialization.Model): 'node_result': {'key': 'NodeResult', 'type': 'NodeResult'}, } - def __init__( - self, - *, - error_code: Optional[int] = None, - node_result: Optional["NodeResult"] = None, - **kwargs - ): + def __init__(self, *, error_code: int=None, node_result=None, **kwargs) -> None: super(NodeTransitionResult, self).__init__(**kwargs) self.error_code = error_code self.node_result = node_result -class NodeTypeHealthPolicyMapItem(msrest.serialization.Model): +class NodeTypeHealthPolicyMapItem(Model): """Defines an item in NodeTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the node type health policy map item. This is the name of the - node type. + :param key: Required. The key of the node type health policy map item. + This is the name of the node type. :type key: str :param value: Required. The value of the node type health policy map item. - If the percentage is respected but there is at least one unhealthy node in the node type, the - health is evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy nodes over the total number - of nodes in the node type. - The computation rounds up to tolerate one failure on small numbers of nodes. - The max percent unhealthy nodes allowed for the node type. Must be between zero and 100. + If the percentage is respected but there is at least one unhealthy node in + the node type, the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy nodes + over the total number of nodes in the node type. + The computation rounds up to tolerate one failure on small numbers of + nodes. + The max percent unhealthy nodes allowed for the node type. Must be between + zero and 100. :type value: int """ @@ -17011,51 +14167,48 @@ class NodeTypeHealthPolicyMapItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'int'}, } - def __init__( - self, - *, - key: str, - value: int, - **kwargs - ): + def __init__(self, *, key: str, value: int, **kwargs) -> None: super(NodeTypeHealthPolicyMapItem, self).__init__(**kwargs) self.key = key self.value = value class NodeTypeNodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for nodes of a particular node type. The node type nodes evaluation can be returned when cluster health evaluation returns unhealthy aggregated health state, either Error or Warning. It contains health evaluations for each unhealthy node of the included node type that impacted current aggregated health state. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for nodes of a particular node type. The node + type nodes evaluation can be returned when cluster health evaluation + returns unhealthy aggregated health state, either Error or Warning. It + contains health evaluations for each unhealthy node of the included node + type that impacted current aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param node_type_name: The node type name as defined in the cluster manifest. + :param kind: Required. Constant filled by server. + :type kind: str + :param node_type_name: The node type name as defined in the cluster + manifest. :type node_type_name: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes for the node - type, specified as an entry in NodeTypeHealthPolicyMap. + :param max_percent_unhealthy_nodes: Maximum allowed percentage of + unhealthy nodes for the node type, specified as an entry in + NodeTypeHealthPolicyMap. :type max_percent_unhealthy_nodes: int - :param total_count: Total number of nodes of the node type found in the health store. + :param total_count: Total number of nodes of the node type found in the + health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy NodeHealthEvaluation of this node type that impacted the - aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation of this node type that impacted the aggregated + health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -17063,32 +14216,22 @@ class NodeTypeNodesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_type_name': {'key': 'NodeTypeName', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - node_type_name: Optional[str] = None, - max_percent_unhealthy_nodes: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, node_type_name: str=None, max_percent_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(NodeTypeNodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'NodeTypeNodes' # type: str self.node_type_name = node_type_name self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'NodeTypeNodes' class NodeUpEvent(NodeEvent): @@ -17096,94 +14239,65 @@ class NodeUpEvent(NodeEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param node_instance: Required. Id of Node instance. :type node_instance: long :param last_node_down_at: Required. Time when Node was last down. - :type last_node_down_at: ~datetime.datetime + :type last_node_down_at: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'node_name': {'required': True}, 'node_instance': {'required': True}, 'last_node_down_at': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, 'last_node_down_at': {'key': 'LastNodeDownAt', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - node_name: str, - node_instance: int, - last_node_down_at: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, last_node_down_at, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(NodeUpEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) - self.kind = 'NodeUp' # type: str self.node_instance = node_instance self.last_node_down_at = last_node_down_at + self.kind = 'NodeUp' -class NodeUpgradeProgressInfo(msrest.serialization.Model): +class NodeUpgradeProgressInfo(Model): """Information about the upgrading node and its status. :param node_name: The name of a Service Fabric node. :type node_name: str - :param upgrade_phase: The state of the upgrading node. Possible values include: "Invalid", - "PreUpgradeSafetyCheck", "Upgrading", "PostUpgradeSafetyCheck". + :param upgrade_phase: The state of the upgrading node. Possible values + include: 'Invalid', 'PreUpgradeSafetyCheck', 'Upgrading', + 'PostUpgradeSafetyCheck' :type upgrade_phase: str or ~azure.servicefabric.models.NodeUpgradePhase - :param pending_safety_checks: List of pending safety checks. - :type pending_safety_checks: list[~azure.servicefabric.models.SafetyCheckWrapper] + :param pending_safety_checks: List of pending safety checks + :type pending_safety_checks: + list[~azure.servicefabric.models.SafetyCheckWrapper] """ _attribute_map = { @@ -17192,31 +14306,27 @@ class NodeUpgradeProgressInfo(msrest.serialization.Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__( - self, - *, - node_name: Optional[str] = None, - upgrade_phase: Optional[Union[str, "NodeUpgradePhase"]] = None, - pending_safety_checks: Optional[List["SafetyCheckWrapper"]] = None, - **kwargs - ): + def __init__(self, *, node_name: str=None, upgrade_phase=None, pending_safety_checks=None, **kwargs) -> None: super(NodeUpgradeProgressInfo, self).__init__(**kwargs) self.node_name = node_name self.upgrade_phase = upgrade_phase self.pending_safety_checks = pending_safety_checks -class OperationStatus(msrest.serialization.Model): - """Contains the OperationId, OperationState, and OperationType for user-induced operations. +class OperationStatus(Model): + """Contains the OperationId, OperationState, and OperationType for + user-induced operations. - :param operation_id: A GUID that identifies a call to this API. This is also passed into the - corresponding GetProgress API. + :param operation_id: A GUID that identifies a call to this API. This is + also passed into the corresponding GetProgress API. :type operation_id: str - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param type: The type of the operation. Possible values include: "Invalid", - "PartitionDataLoss", "PartitionQuorumLoss", "PartitionRestart", "NodeTransition". + :param type: The type of the operation. Possible values include: + 'Invalid', 'PartitionDataLoss', 'PartitionQuorumLoss', 'PartitionRestart', + 'NodeTransition' :type type: str or ~azure.servicefabric.models.OperationType """ @@ -17226,30 +14336,25 @@ class OperationStatus(msrest.serialization.Model): 'type': {'key': 'Type', 'type': 'str'}, } - def __init__( - self, - *, - operation_id: Optional[str] = None, - state: Optional[Union[str, "OperationState"]] = None, - type: Optional[Union[str, "OperationType"]] = None, - **kwargs - ): + def __init__(self, *, operation_id: str=None, state=None, type=None, **kwargs) -> None: super(OperationStatus, self).__init__(**kwargs) self.operation_id = operation_id self.state = state self.type = type -class PackageSharingPolicyInfo(msrest.serialization.Model): +class PackageSharingPolicyInfo(Model): """Represents a policy for the package sharing. - :param shared_package_name: The name of code, configuration or data package that should be - shared. + :param shared_package_name: The name of code, configuration or data + package that should be shared. :type shared_package_name: str - :param package_sharing_scope: Represents the scope for PackageSharingPolicy. This is specified - during DeployServicePackageToNode operation. Possible values include: "None", "All", "Code", - "Config", "Data". - :type package_sharing_scope: str or ~azure.servicefabric.models.PackageSharingPolicyScope + :param package_sharing_scope: Represents the scope for + PackageSharingPolicy. This is specified during DeployServicePackageToNode + operation. Possible values include: 'None', 'All', 'Code', 'Config', + 'Data' + :type package_sharing_scope: str or + ~azure.servicefabric.models.PackageSharingPolicyScope """ _attribute_map = { @@ -17257,26 +14362,24 @@ class PackageSharingPolicyInfo(msrest.serialization.Model): 'package_sharing_scope': {'key': 'PackageSharingScope', 'type': 'str'}, } - def __init__( - self, - *, - shared_package_name: Optional[str] = None, - package_sharing_scope: Optional[Union[str, "PackageSharingPolicyScope"]] = None, - **kwargs - ): + def __init__(self, *, shared_package_name: str=None, package_sharing_scope=None, **kwargs) -> None: super(PackageSharingPolicyInfo, self).__init__(**kwargs) self.shared_package_name = shared_package_name self.package_sharing_scope = package_sharing_scope -class PagedApplicationInfoList(msrest.serialization.Model): - """The list of applications in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedApplicationInfoList(Model): + """The list of applications in the cluster. The list is paged when all of the + results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of application information. :type items: list[~azure.servicefabric.models.ApplicationInfo] @@ -17287,29 +14390,28 @@ class PagedApplicationInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ApplicationInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["ApplicationInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedApplicationInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedApplicationResourceDescriptionList(msrest.serialization.Model): - """The list of application resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedApplicationResourceDescriptionList(Model): + """The list of application resources. The list is paged when all of the + results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. - :type items: list[~azure.servicefabric.models.ApplicationResourceDescription] + :type items: + list[~azure.servicefabric.models.ApplicationResourceDescription] """ _attribute_map = { @@ -17317,26 +14419,24 @@ class PagedApplicationResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ApplicationResourceDescription]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["ApplicationResourceDescription"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedApplicationResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedApplicationTypeInfoList(msrest.serialization.Model): - """The list of application types that are provisioned or being provisioned in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedApplicationTypeInfoList(Model): + """The list of application types that are provisioned or being provisioned in + the cluster. The list is paged when all of the results cannot fit in a + single message. The next set of results can be obtained by executing the + same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of application type information. :type items: list[~azure.servicefabric.models.ApplicationTypeInfo] @@ -17347,26 +14447,24 @@ class PagedApplicationTypeInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ApplicationTypeInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["ApplicationTypeInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedApplicationTypeInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedBackupConfigurationInfoList(msrest.serialization.Model): - """The list of backup configuration information. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedBackupConfigurationInfoList(Model): + """The list of backup configuration information. The list is paged when all of + the results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of backup configuration information. :type items: list[~azure.servicefabric.models.BackupConfigurationInfo] @@ -17377,26 +14475,24 @@ class PagedBackupConfigurationInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[BackupConfigurationInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["BackupConfigurationInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedBackupConfigurationInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedBackupEntityList(msrest.serialization.Model): - """The list of backup entities that are being periodically backed. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedBackupEntityList(Model): + """The list of backup entities that are being periodically backed. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of backup entity information. :type items: list[~azure.servicefabric.models.BackupEntity] @@ -17407,26 +14503,23 @@ class PagedBackupEntityList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[BackupEntity]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["BackupEntity"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedBackupEntityList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedBackupInfoList(msrest.serialization.Model): - """The list of backups. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedBackupInfoList(Model): + """The list of backups. The list is paged when all of the results cannot fit + in a single message. The next set of results can be obtained by executing + the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of backup information. :type items: list[~azure.servicefabric.models.BackupInfo] @@ -17437,26 +14530,24 @@ class PagedBackupInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[BackupInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["BackupInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedBackupInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedBackupPolicyDescriptionList(msrest.serialization.Model): - """The list of backup policies configured in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedBackupPolicyDescriptionList(Model): + """The list of backup policies configured in the cluster. The list is paged + when all of the results cannot fit in a single message. The next set of + results can be obtained by executing the same query with the continuation + token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: The list of backup policies information. :type items: list[~azure.servicefabric.models.BackupPolicyDescription] @@ -17467,26 +14558,24 @@ class PagedBackupPolicyDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[BackupPolicyDescription]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["BackupPolicyDescription"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedBackupPolicyDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedComposeDeploymentStatusInfoList(msrest.serialization.Model): - """The list of compose deployments in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedComposeDeploymentStatusInfoList(Model): + """The list of compose deployments in the cluster. The list is paged when all + of the results cannot fit in a single message. The next set of results can + be obtained by executing the same query with the continuation token + provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of compose deployment status information. :type items: list[~azure.servicefabric.models.ComposeDeploymentStatusInfo] @@ -17497,28 +14586,25 @@ class PagedComposeDeploymentStatusInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ComposeDeploymentStatusInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["ComposeDeploymentStatusInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedComposeDeploymentStatusInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedDeployedApplicationInfoList(msrest.serialization.Model): - """The list of deployed applications in activating, downloading, or active states on a node. -The list is paged when all of the results cannot fit in a single message. -The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedDeployedApplicationInfoList(Model): + """The list of deployed applications in activating, downloading, or active + states on a node. + The list is paged when all of the results cannot fit in a single message. + The next set of results can be obtained by executing the same query with + the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of deployed application information. :type items: list[~azure.servicefabric.models.DeployedApplicationInfo] @@ -17529,26 +14615,23 @@ class PagedDeployedApplicationInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[DeployedApplicationInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["DeployedApplicationInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedDeployedApplicationInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedGatewayResourceDescriptionList(msrest.serialization.Model): - """The list of gateway resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedGatewayResourceDescriptionList(Model): + """The list of gateway resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.GatewayResourceDescription] @@ -17559,26 +14642,23 @@ class PagedGatewayResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[GatewayResourceDescription]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["GatewayResourceDescription"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedGatewayResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedNetworkResourceDescriptionList(msrest.serialization.Model): - """The list of network resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedNetworkResourceDescriptionList(Model): + """The list of network resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.NetworkResourceDescription] @@ -17589,26 +14669,23 @@ class PagedNetworkResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[NetworkResourceDescription]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["NetworkResourceDescription"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedNetworkResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedNodeInfoList(msrest.serialization.Model): - """The list of nodes in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedNodeInfoList(Model): + """The list of nodes in the cluster. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of node information. :type items: list[~azure.servicefabric.models.NodeInfo] @@ -17619,29 +14696,28 @@ class PagedNodeInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[NodeInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["NodeInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedNodeInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedPropertyInfoList(msrest.serialization.Model): - """The paged list of Service Fabric properties under a given name. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedPropertyInfoList(Model): + """The paged list of Service Fabric properties under a given name. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str - :param is_consistent: Indicates whether any property under the given name has been modified - during the enumeration. If there was a modification, this property value is false. + :param is_consistent: Indicates whether any property under the given name + has been modified during the enumeration. If there was a modification, + this property value is false. :type is_consistent: bool :param properties: List of property information. :type properties: list[~azure.servicefabric.models.PropertyInfo] @@ -17653,28 +14729,25 @@ class PagedPropertyInfoList(msrest.serialization.Model): 'properties': {'key': 'Properties', 'type': '[PropertyInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - is_consistent: Optional[bool] = None, - properties: Optional[List["PropertyInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, is_consistent: bool=None, properties=None, **kwargs) -> None: super(PagedPropertyInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.is_consistent = is_consistent self.properties = properties -class PagedReplicaInfoList(msrest.serialization.Model): - """The list of replicas in the cluster for a given partition. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedReplicaInfoList(Model): + """The list of replicas in the cluster for a given partition. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of replica information. :type items: list[~azure.servicefabric.models.ReplicaInfo] @@ -17685,26 +14758,23 @@ class PagedReplicaInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ReplicaInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["ReplicaInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedReplicaInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedSecretResourceDescriptionList(msrest.serialization.Model): - """The list of secret resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedSecretResourceDescriptionList(Model): + """The list of secret resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.SecretResourceDescription] @@ -17715,29 +14785,28 @@ class PagedSecretResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[SecretResourceDescription]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["SecretResourceDescription"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedSecretResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedSecretValueResourceDescriptionList(msrest.serialization.Model): - """The list of values of a secret resource, paged if the number of results exceeds the limits of a single message. The next set of results can be obtained by executing the same query with the continuation token provided in the previous page. +class PagedSecretValueResourceDescriptionList(Model): + """The list of values of a secret resource, paged if the number of results + exceeds the limits of a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in the previous page. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. - :type items: list[~azure.servicefabric.models.SecretValueResourceDescription] + :type items: + list[~azure.servicefabric.models.SecretValueResourceDescription] """ _attribute_map = { @@ -17745,26 +14814,24 @@ class PagedSecretValueResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[SecretValueResourceDescription]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["SecretValueResourceDescription"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedSecretValueResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedServiceInfoList(msrest.serialization.Model): - """The list of services in the cluster for an application. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedServiceInfoList(Model): + """The list of services in the cluster for an application. The list is paged + when all of the results cannot fit in a single message. The next set of + results can be obtained by executing the same query with the continuation + token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of service information. :type items: list[~azure.servicefabric.models.ServiceInfo] @@ -17775,26 +14842,24 @@ class PagedServiceInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServiceInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["ServiceInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedServiceInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedServicePartitionInfoList(msrest.serialization.Model): - """The list of partition in the cluster for a service. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedServicePartitionInfoList(Model): + """The list of partition in the cluster for a service. The list is paged when + all of the results cannot fit in a single message. The next set of results + can be obtained by executing the same query with the continuation token + provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of service partition information. :type items: list[~azure.servicefabric.models.ServicePartitionInfo] @@ -17805,26 +14870,24 @@ class PagedServicePartitionInfoList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServicePartitionInfo]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["ServicePartitionInfo"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedServicePartitionInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedServiceReplicaDescriptionList(msrest.serialization.Model): - """The list of service resource replicas in the cluster. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedServiceReplicaDescriptionList(Model): + """The list of service resource replicas in the cluster. The list is paged + when all of the results cannot fit in a single message. The next set of + results can be obtained by executing the same query with the continuation + token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of service resource replica description. :type items: list[~azure.servicefabric.models.ServiceReplicaDescription] @@ -17835,26 +14898,23 @@ class PagedServiceReplicaDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServiceReplicaDescription]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["ServiceReplicaDescription"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedServiceReplicaDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedServiceResourceDescriptionList(msrest.serialization.Model): - """The list of service resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedServiceResourceDescriptionList(Model): + """The list of service resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.ServiceResourceDescription] @@ -17865,29 +14925,28 @@ class PagedServiceResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServiceResourceDescription]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["ServiceResourceDescription"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedServiceResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedSubNameInfoList(msrest.serialization.Model): - """A paged list of Service Fabric names. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedSubNameInfoList(Model): + """A paged list of Service Fabric names. The list is paged when all of the + results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str - :param is_consistent: Indicates whether any name under the given name has been modified during - the enumeration. If there was a modification, this property value is false. + :param is_consistent: Indicates whether any name under the given name has + been modified during the enumeration. If there was a modification, this + property value is false. :type is_consistent: bool :param sub_names: List of the child names. :type sub_names: list[str] @@ -17899,28 +14958,25 @@ class PagedSubNameInfoList(msrest.serialization.Model): 'sub_names': {'key': 'SubNames', 'type': '[str]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - is_consistent: Optional[bool] = None, - sub_names: Optional[List[str]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, is_consistent: bool=None, sub_names=None, **kwargs) -> None: super(PagedSubNameInfoList, self).__init__(**kwargs) self.continuation_token = continuation_token self.is_consistent = is_consistent self.sub_names = sub_names -class PagedUpdatePartitionLoadResultList(msrest.serialization.Model): - """The list of results of the call UpdatePartitionLoad. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedUpdatePartitionLoadResultList(Model): + """The list of results of the call UpdatePartitionLoad. The list is paged when + all of the results cannot fit in a single message. The next set of results + can be obtained by executing the same query with the continuation token + provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: List of partition load update information. :type items: list[~azure.servicefabric.models.UpdatePartitionLoadResult] @@ -17931,26 +14987,23 @@ class PagedUpdatePartitionLoadResultList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[UpdatePartitionLoadResult]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["UpdatePartitionLoadResult"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedUpdatePartitionLoadResultList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items -class PagedVolumeResourceDescriptionList(msrest.serialization.Model): - """The list of volume resources. The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. +class PagedVolumeResourceDescriptionList(Model): + """The list of volume resources. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. - :param continuation_token: The continuation token parameter is used to obtain next set of - results. The continuation token is included in the response of the API when the results from - the system do not fit in a single response. When this value is passed to the next API call, the - API returns next set of results. If there are no further results, then the continuation token - is not included in the response. + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results, then the + continuation token is not included in the response. :type continuation_token: str :param items: One page of the list. :type items: list[~azure.servicefabric.models.VolumeResourceDescription] @@ -17961,13 +15014,7 @@ class PagedVolumeResourceDescriptionList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[VolumeResourceDescription]'}, } - def __init__( - self, - *, - continuation_token: Optional[str] = None, - items: Optional[List["VolumeResourceDescription"]] = None, - **kwargs - ): + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: super(PagedVolumeResourceDescriptionList, self).__init__(**kwargs) self.continuation_token = continuation_token self.items = items @@ -17977,65 +15024,46 @@ class PartitionAnalysisEvent(PartitionEvent): """Represents the base for all Partition Analysis Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PartitionPrimaryMoveAnalysisEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: PartitionPrimaryMoveAnalysisEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param metadata: Required. Metadata about an Analysis Event. :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'metadata': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, } @@ -18044,40 +15072,32 @@ class PartitionAnalysisEvent(PartitionEvent): 'kind': {'PartitionPrimaryMoveAnalysis': 'PartitionPrimaryMoveAnalysisEvent'} } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - metadata: "AnalysisEventMetadata", - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, metadata, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(PartitionAnalysisEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) - self.kind = 'PartitionAnalysisEvent' # type: str self.metadata = metadata + self.kind = 'PartitionAnalysisEvent' class PartitionBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information, for a specific partition, specifying what backup policy is being applied and suspend description, if any. + """Backup configuration information, for a specific partition, specifying what + backup policy is being applied and suspend description, if any. All required parameters must be populated in order to send to Azure. - :param kind: Required. The entity type of a Service Fabric entity such as Application, Service - or a Partition where periodic backups can be enabled.Constant filled by server. Possible - values include: "Invalid", "Partition", "Service", "Application". - :type kind: str or ~azure.servicefabric.models.BackupEntityKind - :param policy_name: The name of the backup policy which is applicable to this Service Fabric - application or service or partition. + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup policy is applied. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str :param partition_id: The partition ID identifying the partition. :type partition_id: str @@ -18088,28 +15108,19 @@ class PartitionBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - policy_name: Optional[str] = None, - policy_inherited_from: Optional[Union[str, "BackupPolicyScope"]] = None, - suspension_info: Optional["BackupSuspensionInfo"] = None, - service_name: Optional[str] = None, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, service_name: str=None, partition_id: str=None, **kwargs) -> None: super(PartitionBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info, **kwargs) - self.kind = 'Partition' # type: str self.service_name = service_name self.partition_id = partition_id + self.kind = 'Partition' class PartitionBackupEntity(BackupEntity): @@ -18117,11 +15128,10 @@ class PartitionBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, - Service or a Partition where periodic backups can be enabled.Constant filled by server. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str :param partition_id: The partition ID identifying the partition. :type partition_id: str @@ -18137,28 +15147,24 @@ class PartitionBackupEntity(BackupEntity): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - service_name: Optional[str] = None, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, partition_id: str=None, **kwargs) -> None: super(PartitionBackupEntity, self).__init__(**kwargs) - self.entity_kind = 'Partition' # type: str self.service_name = service_name self.partition_id = partition_id + self.entity_kind = 'Partition' -class PartitionDataLossProgress(msrest.serialization.Model): +class PartitionDataLossProgress(Model): """Information about a partition data loss user-induced operation. - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param invoke_data_loss_result: Represents information about an operation in a terminal state - (Completed or Faulted). - :type invoke_data_loss_result: ~azure.servicefabric.models.InvokeDataLossResult + :param invoke_data_loss_result: Represents information about an operation + in a terminal state (Completed or Faulted). + :type invoke_data_loss_result: + ~azure.servicefabric.models.InvokeDataLossResult """ _attribute_map = { @@ -18166,13 +15172,7 @@ class PartitionDataLossProgress(msrest.serialization.Model): 'invoke_data_loss_result': {'key': 'InvokeDataLossResult', 'type': 'InvokeDataLossResult'}, } - def __init__( - self, - *, - state: Optional[Union[str, "OperationState"]] = None, - invoke_data_loss_result: Optional["InvokeDataLossResult"] = None, - **kwargs - ): + def __init__(self, *, state=None, invoke_data_loss_result=None, **kwargs) -> None: super(PartitionDataLossProgress, self).__init__(**kwargs) self.state = state self.invoke_data_loss_result = invoke_data_loss_result @@ -18181,25 +15181,30 @@ def __init__( class PartitionHealth(EntityHealth): """Information about the health of a Service Fabric partition. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param partition_id: ID of the partition whose health information is described by this object. + :param partition_id: ID of the partition whose health information is + described by this object. :type partition_id: str - :param replica_health_states: The list of replica health states associated with the partition. - :type replica_health_states: list[~azure.servicefabric.models.ReplicaHealthState] + :param replica_health_states: The list of replica health states associated + with the partition. + :type replica_health_states: + list[~azure.servicefabric.models.ReplicaHealthState] """ _attribute_map = { @@ -18211,50 +15216,40 @@ class PartitionHealth(EntityHealth): 'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - partition_id: Optional[str] = None, - replica_health_states: Optional[List["ReplicaHealthState"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, replica_health_states=None, **kwargs) -> None: super(PartitionHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.partition_id = partition_id self.replica_health_states = replica_health_states class PartitionHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a partition, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a partition, containing information about + the data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param partition_id: Id of the partition whose health evaluation is described by this object. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition whose health evaluation is + described by this object. :type partition_id: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the partition. The types of the unhealthy evaluations can be - ReplicasHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the partition. The types of the + unhealthy evaluations can be ReplicasHealthEvaluation or + EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -18262,26 +15257,18 @@ class PartitionHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - partition_id: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, partition_id: str=None, unhealthy_evaluations=None, **kwargs) -> None: super(PartitionHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Partition' # type: str self.partition_id = partition_id self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Partition' class PartitionHealthReportExpiredEvent(PartitionEvent): @@ -18289,42 +15276,23 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param source_id: Required. Id of report source. :type source_id: str @@ -18338,16 +15306,17 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, @@ -18360,11 +15329,11 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, @@ -18376,26 +15345,8 @@ class PartitionHealthReportExpiredEvent(PartitionEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(PartitionHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) - self.kind = 'PartitionHealthReportExpired' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -18404,16 +15355,21 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'PartitionHealthReportExpired' class PartitionHealthState(EntityHealthState): - """Represents the health state of a partition, which contains the partition identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param partition_id: Id of the partition whose health state is described by this object. + """Represents the health state of a partition, which contains the partition + identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: Id of the partition whose health state is described + by this object. :type partition_id: str """ @@ -18422,29 +15378,27 @@ class PartitionHealthState(EntityHealthState): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, partition_id: str=None, **kwargs) -> None: super(PartitionHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.partition_id = partition_id class PartitionHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a partition, which contains the partition ID, its aggregated health state and any replicas that respect the filters in the cluster health chunk query description. + """Represents the health state chunk of a partition, which contains the + partition ID, its aggregated health state and any replicas that respect the + filters in the cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param partition_id: The Id of the partition. :type partition_id: str - :param replica_health_state_chunks: The list of replica health state chunks belonging to the - partition that respect the filters in the cluster health chunk query description. - :type replica_health_state_chunks: ~azure.servicefabric.models.ReplicaHealthStateChunkList + :param replica_health_state_chunks: The list of replica health state + chunks belonging to the partition that respect the filters in the cluster + health chunk query description. + :type replica_health_state_chunks: + ~azure.servicefabric.models.ReplicaHealthStateChunkList """ _attribute_map = { @@ -18453,25 +15407,20 @@ class PartitionHealthStateChunk(EntityHealthStateChunk): 'replica_health_state_chunks': {'key': 'ReplicaHealthStateChunks', 'type': 'ReplicaHealthStateChunkList'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - partition_id: Optional[str] = None, - replica_health_state_chunks: Optional["ReplicaHealthStateChunkList"] = None, - **kwargs - ): + def __init__(self, *, health_state=None, partition_id: str=None, replica_health_state_chunks=None, **kwargs) -> None: super(PartitionHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.partition_id = partition_id self.replica_health_state_chunks = replica_health_state_chunks -class PartitionHealthStateChunkList(msrest.serialization.Model): - """The list of partition health state chunks that respect the input filters in the chunk query description. -Returned by get cluster health state chunks query as part of the parent application hierarchy. +class PartitionHealthStateChunkList(Model): + """The list of partition health state chunks that respect the input filters in + the chunk query description. + Returned by get cluster health state chunks query as part of the parent + application hierarchy. - :param items: The list of partition health state chunks that respect the input filters in the - chunk query. + :param items: The list of partition health state chunks that respect the + input filters in the chunk query. :type items: list[~azure.servicefabric.models.PartitionHealthStateChunk] """ @@ -18479,60 +15428,68 @@ class PartitionHealthStateChunkList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[PartitionHealthStateChunk]'}, } - def __init__( - self, - *, - items: Optional[List["PartitionHealthStateChunk"]] = None, - **kwargs - ): + def __init__(self, *, items=None, **kwargs) -> None: super(PartitionHealthStateChunkList, self).__init__(**kwargs) self.items = items -class PartitionHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a partition should be included as a child of a service in the cluster health chunk. -The partitions are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent service and application must be included in the cluster health chunk. -One filter can match zero, one or multiple partitions, depending on its properties. - - :param partition_id_filter: ID of the partition that matches the filter. The filter is applied - only to the specified partition, if it exists. - If the partition doesn't exist, no partition is returned in the cluster health chunk based on - this filter. - If the partition exists, it is included in the cluster health chunk if it respects the other - filter properties. - If not specified, all partitions that match the parent filters (if any) are taken into - consideration and matched against the other filter members, like health state filter. +class PartitionHealthStateFilter(Model): + """Defines matching criteria to determine whether a partition should be + included as a child of a service in the cluster health chunk. + The partitions are only returned if the parent entities match a filter + specified in the cluster health chunk query description. The parent service + and application must be included in the cluster health chunk. + One filter can match zero, one or multiple partitions, depending on its + properties. + + :param partition_id_filter: ID of the partition that matches the filter. + The filter is applied only to the specified partition, if it exists. + If the partition doesn't exist, no partition is returned in the cluster + health chunk based on this filter. + If the partition exists, it is included in the cluster health chunk if it + respects the other filter properties. + If not specified, all partitions that match the parent filters (if any) + are taken into consideration and matched against the other filter members, + like health state filter. :type partition_id_filter: str - :param health_state_filter: The filter for the health state of the partitions. It allows - selecting partitions if they match the desired health states. - The possible values are integer value of one of the following health states. Only partitions - that match the filter are returned. All partitions are used to evaluate the cluster aggregated - health state. - If not specified, default value is None, unless the partition ID is specified. If the filter - has default value and partition ID is specified, the matching partition is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches partitions with HealthState value of OK - (2) and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + partitions. It allows selecting partitions if they match the desired + health states. + The possible values are integer value of one of the following health + states. Only partitions that match the filter are returned. All partitions + are used to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the partition ID is + specified. If the filter has default value and partition ID is specified, + the matching partition is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches partitions with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int - :param replica_filters: Defines a list of filters that specify which replicas to be included in - the returned cluster health chunk as children of the parent partition. The replicas are - returned only if the parent partition matches a filter. - If the list is empty, no replicas are returned. All the replicas are used to evaluate the - parent partition aggregated health state, regardless of the input filters. + :param replica_filters: Defines a list of filters that specify which + replicas to be included in the returned cluster health chunk as children + of the parent partition. The replicas are returned only if the parent + partition matches a filter. + If the list is empty, no replicas are returned. All the replicas are used + to evaluate the parent partition aggregated health state, regardless of + the input filters. The partition filter may specify multiple replica filters. - For example, it can specify a filter to return all replicas with health state Error and - another filter to always include a replica identified by its replica id. - :type replica_filters: list[~azure.servicefabric.models.ReplicaHealthStateFilter] + For example, it can specify a filter to return all replicas with health + state Error and another filter to always include a replica identified by + its replica id. + :type replica_filters: + list[~azure.servicefabric.models.ReplicaHealthStateFilter] """ _attribute_map = { @@ -18541,14 +15498,7 @@ class PartitionHealthStateFilter(msrest.serialization.Model): 'replica_filters': {'key': 'ReplicaFilters', 'type': '[ReplicaHealthStateFilter]'}, } - def __init__( - self, - *, - partition_id_filter: Optional[str] = None, - health_state_filter: Optional[int] = 0, - replica_filters: Optional[List["ReplicaHealthStateFilter"]] = None, - **kwargs - ): + def __init__(self, *, partition_id_filter: str=None, health_state_filter: int=0, replica_filters=None, **kwargs) -> None: super(PartitionHealthStateFilter, self).__init__(**kwargs) self.partition_id_filter = partition_id_filter self.health_state_filter = health_state_filter @@ -18556,20 +15506,21 @@ def __init__( class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): - """Represents a scaling mechanism for adding or removing instances of stateless service partition. + """Represents a scaling mechanism for adding or removing instances of + stateless service partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. Specifies the kind of scaling mechanism.Constant filled by server. - Possible values include: "Invalid", "PartitionInstanceCount", - "AddRemoveIncrementalNamedPartition". - :type kind: str or ~azure.servicefabric.models.ScalingMechanismKind - :param min_instance_count: Required. Minimum number of instances of the partition. + :param kind: Required. Constant filled by server. + :type kind: str + :param min_instance_count: Required. Minimum number of instances of the + partition. :type min_instance_count: int - :param max_instance_count: Required. Maximum number of instances of the partition. + :param max_instance_count: Required. Maximum number of instances of the + partition. :type max_instance_count: int - :param scale_increment: Required. The number of instances to add or remove during a scaling - operation. + :param scale_increment: Required. The number of instances to add or remove + during a scaling operation. :type scale_increment: int """ @@ -18587,35 +15538,32 @@ class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, } - def __init__( - self, - *, - min_instance_count: int, - max_instance_count: int, - scale_increment: int, - **kwargs - ): + def __init__(self, *, min_instance_count: int, max_instance_count: int, scale_increment: int, **kwargs) -> None: super(PartitionInstanceCountScaleMechanism, self).__init__(**kwargs) - self.kind = 'PartitionInstanceCount' # type: str self.min_instance_count = min_instance_count self.max_instance_count = max_instance_count self.scale_increment = scale_increment + self.kind = 'PartitionInstanceCount' -class PartitionLoadInformation(msrest.serialization.Model): - """Represents load information for a partition, which contains the primary and secondary reported load metrics. -In case there is no load reported, PartitionLoadInformation will contain the default load for the service of the partition. -For default loads, LoadMetricReport's LastReportedUtc is set to 0. +class PartitionLoadInformation(Model): + """Represents load information for a partition, which contains the primary and + secondary reported load metrics. + In case there is no load reported, PartitionLoadInformation will contain + the default load for the service of the partition. + For default loads, LoadMetricReport's LastReportedUtc is set to 0. :param partition_id: Id of the partition. :type partition_id: str - :param primary_load_metric_reports: Array of load reports from the primary replica for this - partition. - :type primary_load_metric_reports: list[~azure.servicefabric.models.LoadMetricReport] - :param secondary_load_metric_reports: Array of aggregated load reports from all secondary - replicas for this partition. + :param primary_load_metric_reports: Array of load reports from the primary + replica for this partition. + :type primary_load_metric_reports: + list[~azure.servicefabric.models.LoadMetricReport] + :param secondary_load_metric_reports: Array of aggregated load reports + from all secondary replicas for this partition. Array only contains the latest reported load for each metric. - :type secondary_load_metric_reports: list[~azure.servicefabric.models.LoadMetricReport] + :type secondary_load_metric_reports: + list[~azure.servicefabric.models.LoadMetricReport] """ _attribute_map = { @@ -18624,34 +15572,31 @@ class PartitionLoadInformation(msrest.serialization.Model): 'secondary_load_metric_reports': {'key': 'SecondaryLoadMetricReports', 'type': '[LoadMetricReport]'}, } - def __init__( - self, - *, - partition_id: Optional[str] = None, - primary_load_metric_reports: Optional[List["LoadMetricReport"]] = None, - secondary_load_metric_reports: Optional[List["LoadMetricReport"]] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, primary_load_metric_reports=None, secondary_load_metric_reports=None, **kwargs) -> None: super(PartitionLoadInformation, self).__init__(**kwargs) self.partition_id = partition_id self.primary_load_metric_reports = primary_load_metric_reports self.secondary_load_metric_reports = secondary_load_metric_reports -class PartitionMetricLoadDescription(msrest.serialization.Model): - """Represents load information for a partition, which contains the metrics load information about primary, all secondary replicas/instances or a specific secondary replica/instance located on a specific node. +class PartitionMetricLoadDescription(Model): + """Represents load information for a partition, which contains the metrics + load information about primary, all secondary replicas/instances or a + specific secondary replica/instance located on a specific node. :param partition_id: Id of the partition. :type partition_id: str - :param primary_replica_load_entries: Partition's load information for primary replica, in case - partition is from a stateful service. - :type primary_replica_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] - :param secondary_replicas_or_instances_load_entries: Partition's load information for all - secondary replicas or instances. + :param primary_replica_load_entries: Partition's load information for + primary replica, in case partition is from a stateful service. + :type primary_replica_load_entries: + list[~azure.servicefabric.models.MetricLoadDescription] + :param secondary_replicas_or_instances_load_entries: Partition's load + information for all secondary replicas or instances. :type secondary_replicas_or_instances_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] - :param secondary_replica_or_instance_load_entries_per_node: Partition's load information for a - specific secondary replica or instance located on a specific node. + :param secondary_replica_or_instance_load_entries_per_node: Partition's + load information for a specific secondary replica or instance located on a + specific node. :type secondary_replica_or_instance_load_entries_per_node: list[~azure.servicefabric.models.ReplicaMetricLoadDescription] """ @@ -18663,15 +15608,7 @@ class PartitionMetricLoadDescription(msrest.serialization.Model): 'secondary_replica_or_instance_load_entries_per_node': {'key': 'SecondaryReplicaOrInstanceLoadEntriesPerNode', 'type': '[ReplicaMetricLoadDescription]'}, } - def __init__( - self, - *, - partition_id: Optional[str] = None, - primary_replica_load_entries: Optional[List["MetricLoadDescription"]] = None, - secondary_replicas_or_instances_load_entries: Optional[List["MetricLoadDescription"]] = None, - secondary_replica_or_instance_load_entries_per_node: Optional[List["ReplicaMetricLoadDescription"]] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, primary_replica_load_entries=None, secondary_replicas_or_instances_load_entries=None, secondary_replica_or_instance_load_entries_per_node=None, **kwargs) -> None: super(PartitionMetricLoadDescription, self).__init__(**kwargs) self.partition_id = partition_id self.primary_replica_load_entries = primary_replica_load_entries @@ -18684,42 +15621,23 @@ class PartitionNewHealthReportEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param source_id: Required. Id of report source. :type source_id: str @@ -18733,16 +15651,17 @@ class PartitionNewHealthReportEvent(PartitionEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'source_id': {'required': True}, 'property': {'required': True}, @@ -18755,11 +15674,11 @@ class PartitionNewHealthReportEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, 'property': {'key': 'Property', 'type': 'str'}, @@ -18771,26 +15690,8 @@ class PartitionNewHealthReportEvent(PartitionEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(PartitionNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) - self.kind = 'PartitionNewHealthReport' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -18799,6 +15700,7 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'PartitionNewHealthReport' class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): @@ -18806,47 +15708,28 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param metadata: Required. Metadata about an Analysis Event. :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata :param when_move_completed: Required. Time when the move was completed. - :type when_move_completed: ~datetime.datetime + :type when_move_completed: datetime :param previous_node: Required. The name of a Service Fabric node. :type previous_node: str :param current_node: Required. The name of a Service Fabric node. @@ -18858,9 +15741,9 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'metadata': {'required': True}, 'when_move_completed': {'required': True}, @@ -18871,11 +15754,11 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, 'when_move_completed': {'key': 'WhenMoveCompleted', 'type': 'iso-8601'}, @@ -18885,40 +15768,27 @@ class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): 'relevant_traces': {'key': 'RelevantTraces', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - metadata: "AnalysisEventMetadata", - when_move_completed: datetime.datetime, - previous_node: str, - current_node: str, - move_reason: str, - relevant_traces: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, metadata, when_move_completed, previous_node: str, current_node: str, move_reason: str, relevant_traces: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(PartitionPrimaryMoveAnalysisEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, metadata=metadata, **kwargs) - self.kind = 'PartitionPrimaryMoveAnalysis' # type: str self.when_move_completed = when_move_completed self.previous_node = previous_node self.current_node = current_node self.move_reason = move_reason self.relevant_traces = relevant_traces + self.kind = 'PartitionPrimaryMoveAnalysis' -class PartitionQuorumLossProgress(msrest.serialization.Model): +class PartitionQuorumLossProgress(Model): """Information about a partition quorum loss user-induced operation. - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param invoke_quorum_loss_result: Represents information about an operation in a terminal state - (Completed or Faulted). - :type invoke_quorum_loss_result: ~azure.servicefabric.models.InvokeQuorumLossResult + :param invoke_quorum_loss_result: Represents information about an + operation in a terminal state (Completed or Faulted). + :type invoke_quorum_loss_result: + ~azure.servicefabric.models.InvokeQuorumLossResult """ _attribute_map = { @@ -18926,13 +15796,7 @@ class PartitionQuorumLossProgress(msrest.serialization.Model): 'invoke_quorum_loss_result': {'key': 'InvokeQuorumLossResult', 'type': 'InvokeQuorumLossResult'}, } - def __init__( - self, - *, - state: Optional[Union[str, "OperationState"]] = None, - invoke_quorum_loss_result: Optional["InvokeQuorumLossResult"] = None, - **kwargs - ): + def __init__(self, *, state=None, invoke_quorum_loss_result=None, **kwargs) -> None: super(PartitionQuorumLossProgress, self).__init__(**kwargs) self.state = state self.invoke_quorum_loss_result = invoke_quorum_loss_result @@ -18943,42 +15807,23 @@ class PartitionReconfiguredEvent(PartitionEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str :param node_name: Required. The name of a Service Fabric node. :type node_name: str @@ -19009,9 +15854,9 @@ class PartitionReconfiguredEvent(PartitionEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'node_name': {'required': True}, 'node_instance_id': {'required': True}, @@ -19029,11 +15874,11 @@ class PartitionReconfiguredEvent(PartitionEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, @@ -19050,31 +15895,8 @@ class PartitionReconfiguredEvent(PartitionEvent): 'total_duration_ms': {'key': 'TotalDurationMs', 'type': 'float'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - node_name: str, - node_instance_id: str, - service_type: str, - cc_epoch_data_loss_version: int, - cc_epoch_config_version: int, - reconfig_type: str, - result: str, - phase0_duration_ms: float, - phase1_duration_ms: float, - phase2_duration_ms: float, - phase3_duration_ms: float, - phase4_duration_ms: float, - total_duration_ms: float, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, node_name: str, node_instance_id: str, service_type: str, cc_epoch_data_loss_version: int, cc_epoch_config_version: int, reconfig_type: str, result: str, phase0_duration_ms: float, phase1_duration_ms: float, phase2_duration_ms: float, phase3_duration_ms: float, phase4_duration_ms: float, total_duration_ms: float, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(PartitionReconfiguredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) - self.kind = 'PartitionReconfigured' # type: str self.node_name = node_name self.node_instance_id = node_instance_id self.service_type = service_type @@ -19088,17 +15910,20 @@ def __init__( self.phase3_duration_ms = phase3_duration_ms self.phase4_duration_ms = phase4_duration_ms self.total_duration_ms = total_duration_ms + self.kind = 'PartitionReconfigured' -class PartitionRestartProgress(msrest.serialization.Model): +class PartitionRestartProgress(Model): """Information about a partition restart user-induced operation. - :param state: The state of the operation. Possible values include: "Invalid", "Running", - "RollingBack", "Completed", "Faulted", "Cancelled", "ForceCancelled". + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' :type state: str or ~azure.servicefabric.models.OperationState - :param restart_partition_result: Represents information about an operation in a terminal state - (Completed or Faulted). - :type restart_partition_result: ~azure.servicefabric.models.RestartPartitionResult + :param restart_partition_result: Represents information about an operation + in a terminal state (Completed or Faulted). + :type restart_partition_result: + ~azure.servicefabric.models.RestartPartitionResult """ _attribute_map = { @@ -19106,49 +15931,43 @@ class PartitionRestartProgress(msrest.serialization.Model): 'restart_partition_result': {'key': 'RestartPartitionResult', 'type': 'RestartPartitionResult'}, } - def __init__( - self, - *, - state: Optional[Union[str, "OperationState"]] = None, - restart_partition_result: Optional["RestartPartitionResult"] = None, - **kwargs - ): + def __init__(self, *, state=None, restart_partition_result=None, **kwargs) -> None: super(PartitionRestartProgress, self).__init__(**kwargs) self.state = state self.restart_partition_result = restart_partition_result class PartitionsHealthEvaluation(HealthEvaluation): - """Represents health evaluation for the partitions of a service, containing health evaluations for each unhealthy partition that impacts current aggregated health state. Can be returned when evaluating service health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for the partitions of a service, containing + health evaluations for each unhealthy partition that impacts current + aggregated health state. Can be returned when evaluating service health and + the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_partitions_per_service: Maximum allowed percentage of unhealthy - partitions per service from the ServiceTypeHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_partitions_per_service: Maximum allowed + percentage of unhealthy partitions per service from the + ServiceTypeHealthPolicy. :type max_percent_unhealthy_partitions_per_service: int - :param total_count: Total number of partitions of the service from the health store. + :param total_count: Total number of partitions of the service from the + health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy PartitionHealthEvaluation that impacted the aggregated - health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + PartitionHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -19156,43 +15975,35 @@ class PartitionsHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_partitions_per_service': {'key': 'MaxPercentUnhealthyPartitionsPerService', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - max_percent_unhealthy_partitions_per_service: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_partitions_per_service: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(PartitionsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Partitions' # type: str self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Partitions' -class ReplicatorStatus(msrest.serialization.Model): +class ReplicatorStatus(Model): """Represents a base class for primary or secondary replicator status. -Contains information about the service fabric replicator like the replication/copy queue utilization, last acknowledgement received timestamp, etc. + Contains information about the service fabric replicator like the + replication/copy queue utilization, last acknowledgement received + timestamp, etc. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus. + sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus All required parameters must be populated in order to send to Azure. - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -19207,27 +16018,27 @@ class ReplicatorStatus(msrest.serialization.Model): 'kind': {'Primary': 'PrimaryReplicatorStatus', 'SecondaryReplicatorStatus': 'SecondaryReplicatorStatus'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(ReplicatorStatus, self).__init__(**kwargs) - self.kind = None # type: Optional[str] + self.kind = None class PrimaryReplicatorStatus(ReplicatorStatus): - """Provides statistics about the Service Fabric Replicator, when it is functioning in a Primary role. + """Provides statistics about the Service Fabric Replicator, when it is + functioning in a Primary role. All required parameters must be populated in order to send to Azure. - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole - :param replication_queue_status: Details about the replication queue on the primary replicator. - :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param remote_replicators: The status of all the active and idle secondary replicators that the - primary is aware of. - :type remote_replicators: list[~azure.servicefabric.models.RemoteReplicatorStatus] + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the primary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param remote_replicators: The status of all the active and idle secondary + replicators that the primary is aware of. + :type remote_replicators: + list[~azure.servicefabric.models.RemoteReplicatorStatus] """ _validation = { @@ -19240,34 +16051,30 @@ class PrimaryReplicatorStatus(ReplicatorStatus): 'remote_replicators': {'key': 'RemoteReplicators', 'type': '[RemoteReplicatorStatus]'}, } - def __init__( - self, - *, - replication_queue_status: Optional["ReplicatorQueueStatus"] = None, - remote_replicators: Optional[List["RemoteReplicatorStatus"]] = None, - **kwargs - ): + def __init__(self, *, replication_queue_status=None, remote_replicators=None, **kwargs) -> None: super(PrimaryReplicatorStatus, self).__init__(**kwargs) - self.kind = 'Primary' # type: str self.replication_queue_status = replication_queue_status self.remote_replicators = remote_replicators + self.kind = 'Primary' -class Probe(msrest.serialization.Model): +class Probe(Model): """Probes have a number of fields that you can use to control their behavior. - :param initial_delay_seconds: The initial delay in seconds to start executing probe once - codepackage has started. + :param initial_delay_seconds: The initial delay in seconds to start + executing probe once codepackage has started. Default value: 0 . :type initial_delay_seconds: int - :param period_seconds: Periodic seconds to execute probe. + :param period_seconds: Periodic seconds to execute probe. Default value: + 10 . :type period_seconds: int - :param timeout_seconds: Period after which probe is considered as failed if it hasn't completed - successfully. + :param timeout_seconds: Period after which probe is considered as failed + if it hasn't completed successfully. Default value: 1 . :type timeout_seconds: int - :param success_threshold: The count of successful probe executions after which probe is - considered success. + :param success_threshold: The count of successful probe executions after + which probe is considered success. Default value: 1 . :type success_threshold: int - :param failure_threshold: The count of failures after which probe is considered failed. + :param failure_threshold: The count of failures after which probe is + considered failed. Default value: 3 . :type failure_threshold: int :param exec_property: Exec command to run inside the container. :type exec_property: ~azure.servicefabric.models.ProbeExec @@ -19288,19 +16095,7 @@ class Probe(msrest.serialization.Model): 'tcp_socket': {'key': 'tcpSocket', 'type': 'ProbeTcpSocket'}, } - def __init__( - self, - *, - initial_delay_seconds: Optional[int] = 0, - period_seconds: Optional[int] = 10, - timeout_seconds: Optional[int] = 1, - success_threshold: Optional[int] = 1, - failure_threshold: Optional[int] = 3, - exec_property: Optional["ProbeExec"] = None, - http_get: Optional["ProbeHttpGet"] = None, - tcp_socket: Optional["ProbeTcpSocket"] = None, - **kwargs - ): + def __init__(self, *, initial_delay_seconds: int=0, period_seconds: int=10, timeout_seconds: int=1, success_threshold: int=1, failure_threshold: int=3, exec_property=None, http_get=None, tcp_socket=None, **kwargs) -> None: super(Probe, self).__init__(**kwargs) self.initial_delay_seconds = initial_delay_seconds self.period_seconds = period_seconds @@ -19312,13 +16107,13 @@ def __init__( self.tcp_socket = tcp_socket -class ProbeExec(msrest.serialization.Model): +class ProbeExec(Model): """Exec command to run inside the container. All required parameters must be populated in order to send to Azure. - :param command: Required. Comma separated command to run inside the container for example "sh, - -c, echo hello world". + :param command: Required. Comma separated command to run inside the + container for example "sh, -c, echo hello world". :type command: str """ @@ -19330,17 +16125,12 @@ class ProbeExec(msrest.serialization.Model): 'command': {'key': 'command', 'type': 'str'}, } - def __init__( - self, - *, - command: str, - **kwargs - ): + def __init__(self, *, command: str, **kwargs) -> None: super(ProbeExec, self).__init__(**kwargs) self.command = command -class ProbeHttpGet(msrest.serialization.Model): +class ProbeHttpGet(Model): """Http probe for the container. All required parameters must be populated in order to send to Azure. @@ -19353,8 +16143,8 @@ class ProbeHttpGet(msrest.serialization.Model): :type host: str :param http_headers: Headers to set in the request. :type http_headers: list[~azure.servicefabric.models.ProbeHttpGetHeaders] - :param scheme: Scheme for the http probe. Can be Http or Https. Possible values include: - "http", "https". + :param scheme: Scheme for the http probe. Can be Http or Https. Possible + values include: 'http', 'https' :type scheme: str or ~azure.servicefabric.models.Scheme """ @@ -19370,16 +16160,7 @@ class ProbeHttpGet(msrest.serialization.Model): 'scheme': {'key': 'scheme', 'type': 'str'}, } - def __init__( - self, - *, - port: int, - path: Optional[str] = None, - host: Optional[str] = None, - http_headers: Optional[List["ProbeHttpGetHeaders"]] = None, - scheme: Optional[Union[str, "Scheme"]] = None, - **kwargs - ): + def __init__(self, *, port: int, path: str=None, host: str=None, http_headers=None, scheme=None, **kwargs) -> None: super(ProbeHttpGet, self).__init__(**kwargs) self.port = port self.path = path @@ -19388,7 +16169,7 @@ def __init__( self.scheme = scheme -class ProbeHttpGetHeaders(msrest.serialization.Model): +class ProbeHttpGetHeaders(Model): """Http headers. All required parameters must be populated in order to send to Azure. @@ -19409,19 +16190,13 @@ class ProbeHttpGetHeaders(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - value: str, - **kwargs - ): + def __init__(self, *, name: str, value: str, **kwargs) -> None: super(ProbeHttpGetHeaders, self).__init__(**kwargs) self.name = name self.value = value -class ProbeTcpSocket(msrest.serialization.Model): +class ProbeTcpSocket(Model): """Tcp port to probe inside the container. All required parameters must be populated in order to send to Azure. @@ -19438,18 +16213,14 @@ class ProbeTcpSocket(msrest.serialization.Model): 'port': {'key': 'port', 'type': 'int'}, } - def __init__( - self, - *, - port: int, - **kwargs - ): + def __init__(self, *, port: int, **kwargs) -> None: super(ProbeTcpSocket, self).__init__(**kwargs) self.port = port -class PropertyBatchDescriptionList(msrest.serialization.Model): - """Describes a list of property batch operations to be executed. Either all or none of the operations will be committed. +class PropertyBatchDescriptionList(Model): + """Describes a list of property batch operations to be executed. Either all or + none of the operations will be committed. :param operations: A list of the property batch operations to be executed. :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] @@ -19459,25 +16230,20 @@ class PropertyBatchDescriptionList(msrest.serialization.Model): 'operations': {'key': 'Operations', 'type': '[PropertyBatchOperation]'}, } - def __init__( - self, - *, - operations: Optional[List["PropertyBatchOperation"]] = None, - **kwargs - ): + def __init__(self, *, operations=None, **kwargs) -> None: super(PropertyBatchDescriptionList, self).__init__(**kwargs) self.operations = operations -class PropertyDescription(msrest.serialization.Model): +class PropertyDescription(Model): """Description of a Service Fabric property. All required parameters must be populated in order to send to Azure. :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param custom_type_id: The property's custom type ID. Using this property, the user is able to - tag the type of the value of the property. + :param custom_type_id: The property's custom type ID. Using this property, + the user is able to tag the type of the value of the property. :type custom_type_id: str :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue @@ -19494,21 +16260,14 @@ class PropertyDescription(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__( - self, - *, - property_name: str, - value: "PropertyValue", - custom_type_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, property_name: str, value, custom_type_id: str=None, **kwargs) -> None: super(PropertyDescription, self).__init__(**kwargs) self.property_name = property_name self.custom_type_id = custom_type_id self.value = value -class PropertyInfo(msrest.serialization.Model): +class PropertyInfo(Model): """Information about a Service Fabric property. All required parameters must be populated in order to send to Azure. @@ -19517,8 +16276,8 @@ class PropertyInfo(msrest.serialization.Model): :type name: str :param value: Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue - :param metadata: Required. The metadata associated with a property, including the property's - name. + :param metadata: Required. The metadata associated with a property, + including the property's name. :type metadata: ~azure.servicefabric.models.PropertyMetadata """ @@ -19533,39 +16292,33 @@ class PropertyInfo(msrest.serialization.Model): 'metadata': {'key': 'Metadata', 'type': 'PropertyMetadata'}, } - def __init__( - self, - *, - name: str, - metadata: "PropertyMetadata", - value: Optional["PropertyValue"] = None, - **kwargs - ): + def __init__(self, *, name: str, metadata, value=None, **kwargs) -> None: super(PropertyInfo, self).__init__(**kwargs) self.name = name self.value = value self.metadata = metadata -class PropertyMetadata(msrest.serialization.Model): +class PropertyMetadata(Model): """The metadata associated with a property, including the property's name. - :param type_id: The kind of property, determined by the type of data. Following are the - possible values. Possible values include: "Invalid", "Binary", "Int64", "Double", "String", - "Guid". + :param type_id: The kind of property, determined by the type of data. + Following are the possible values. Possible values include: 'Invalid', + 'Binary', 'Int64', 'Double', 'String', 'Guid' :type type_id: str or ~azure.servicefabric.models.PropertyValueKind :param custom_type_id: The property's custom type ID. :type custom_type_id: str - :param parent: The name of the parent Service Fabric Name for the property. It could be thought - of as the name-space/table under which the property exists. + :param parent: The name of the parent Service Fabric Name for the + property. It could be thought of as the name-space/table under which the + property exists. :type parent: str :param size_in_bytes: The length of the serialized property value. :type size_in_bytes: int - :param last_modified_utc_timestamp: Represents when the Property was last modified. Only write - operations will cause this field to be updated. - :type last_modified_utc_timestamp: ~datetime.datetime - :param sequence_number: The version of the property. Every time a property is modified, its - sequence number is increased. + :param last_modified_utc_timestamp: Represents when the Property was last + modified. Only write operations will cause this field to be updated. + :type last_modified_utc_timestamp: datetime + :param sequence_number: The version of the property. Every time a property + is modified, its sequence number is increased. :type sequence_number: str """ @@ -19578,17 +16331,7 @@ class PropertyMetadata(msrest.serialization.Model): 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__( - self, - *, - type_id: Optional[Union[str, "PropertyValueKind"]] = None, - custom_type_id: Optional[str] = None, - parent: Optional[str] = None, - size_in_bytes: Optional[int] = None, - last_modified_utc_timestamp: Optional[datetime.datetime] = None, - sequence_number: Optional[str] = None, - **kwargs - ): + def __init__(self, *, type_id=None, custom_type_id: str=None, parent: str=None, size_in_bytes: int=None, last_modified_utc_timestamp=None, sequence_number: str=None, **kwargs) -> None: super(PropertyMetadata, self).__init__(**kwargs) self.type_id = type_id self.custom_type_id = custom_type_id @@ -19599,58 +16342,52 @@ def __init__( class ProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): - """Describes the operation to register or provision an application type using an application package uploaded to the Service Fabric image store. + """Describes the operation to register or provision an application type using + an application package uploaded to the Service Fabric image store. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of application type registration or provision requested. The - application package can be registered or provisioned either from the image store or from an - external store. Following are the kinds of the application type provision.Constant filled by - server. Possible values include: "Invalid", "ImageStorePath", "ExternalStore". - :type kind: str or ~azure.servicefabric.models.ProvisionApplicationTypeKind - :param async_property: Required. Indicates whether or not provisioning should occur - asynchronously. When set to true, the provision operation returns when the request is accepted - by the system, and the provision operation continues without any timeout limit. The default - value is false. For large application packages, we recommend setting the value to true. + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. :type async_property: bool - :param application_type_build_path: Required. The relative path for the application package in - the image store specified during the prior upload operation. + :param kind: Required. Constant filled by server. + :type kind: str + :param application_type_build_path: Required. The relative path for the + application package in the image store specified during the prior upload + operation. :type application_type_build_path: str - :param application_package_cleanup_policy: The kind of action that needs to be taken for - cleaning up the application package after successful provision. Possible values include: - "Invalid", "Default", "Automatic", "Manual". + :param application_package_cleanup_policy: The kind of action that needs + to be taken for cleaning up the application package after successful + provision. Possible values include: 'Invalid', 'Default', 'Automatic', + 'Manual' :type application_package_cleanup_policy: str or ~azure.servicefabric.models.ApplicationPackageCleanupPolicy """ _validation = { - 'kind': {'required': True}, 'async_property': {'required': True}, + 'kind': {'required': True}, 'application_type_build_path': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'async_property': {'key': 'Async', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, 'application_package_cleanup_policy': {'key': 'ApplicationPackageCleanupPolicy', 'type': 'str'}, } - def __init__( - self, - *, - async_property: bool, - application_type_build_path: str, - application_package_cleanup_policy: Optional[Union[str, "ApplicationPackageCleanupPolicy"]] = None, - **kwargs - ): + def __init__(self, *, async_property: bool, application_type_build_path: str, application_package_cleanup_policy=None, **kwargs) -> None: super(ProvisionApplicationTypeDescription, self).__init__(async_property=async_property, **kwargs) - self.kind = 'ImageStorePath' # type: str self.application_type_build_path = application_type_build_path self.application_package_cleanup_policy = application_package_cleanup_policy + self.kind = 'ImageStorePath' -class ProvisionFabricDescription(msrest.serialization.Model): +class ProvisionFabricDescription(Model): """Describes the parameters for provisioning a cluster. :param code_file_path: The cluster code package file path. @@ -19664,13 +16401,7 @@ class ProvisionFabricDescription(msrest.serialization.Model): 'cluster_manifest_file_path': {'key': 'ClusterManifestFilePath', 'type': 'str'}, } - def __init__( - self, - *, - code_file_path: Optional[str] = None, - cluster_manifest_file_path: Optional[str] = None, - **kwargs - ): + def __init__(self, *, code_file_path: str=None, cluster_manifest_file_path: str=None, **kwargs) -> None: super(ProvisionFabricDescription, self).__init__(**kwargs) self.code_file_path = code_file_path self.cluster_manifest_file_path = cluster_manifest_file_path @@ -19678,68 +16409,66 @@ def __init__( class PutPropertyBatchOperation(PropertyBatchOperation): """Puts the specified property under the specified name. -Note that if one PropertyBatchOperation in a PropertyBatch fails, -the entire batch fails and cannot be committed in a transactional manner. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch operation, determined by the operation to be - performed. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Put", "Get", "CheckExists", "CheckSequence", "Delete", "CheckValue". - :type kind: str or ~azure.servicefabric.models.PropertyBatchOperationKind :param property_name: Required. The name of the Service Fabric property. :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue - :param custom_type_id: The property's custom type ID. Using this property, the user is able to - tag the type of the value of the property. + :param custom_type_id: The property's custom type ID. Using this property, + the user is able to tag the type of the value of the property. :type custom_type_id: str """ _validation = { - 'kind': {'required': True}, 'property_name': {'required': True}, + 'kind': {'required': True}, 'value': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'PropertyValue'}, 'custom_type_id': {'key': 'CustomTypeId', 'type': 'str'}, } - def __init__( - self, - *, - property_name: str, - value: "PropertyValue", - custom_type_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, property_name: str, value, custom_type_id: str=None, **kwargs) -> None: super(PutPropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) - self.kind = 'Put' # type: str self.value = value self.custom_type_id = custom_type_id - - -class ReconfigurationInformation(msrest.serialization.Model): - """Information about current reconfiguration like phase, type, previous configuration role of replica and reconfiguration start date time. - - :param previous_configuration_role: Replica role before reconfiguration started. Possible - values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type previous_configuration_role: str or ~azure.servicefabric.models.ReplicaRole - :param reconfiguration_phase: Current phase of ongoing reconfiguration. If no reconfiguration - is taking place then this value will be "None". Possible values include: "Unknown", "None", - "Phase0", "Phase1", "Phase2", "Phase3", "Phase4", "AbortPhaseZero". - :type reconfiguration_phase: str or ~azure.servicefabric.models.ReconfigurationPhase - :param reconfiguration_type: Type of current ongoing reconfiguration. If no reconfiguration is - taking place then this value will be "None". Possible values include: "Unknown", "SwapPrimary", - "Failover", "Other". - :type reconfiguration_type: str or ~azure.servicefabric.models.ReconfigurationType - :param reconfiguration_start_time_utc: Start time (in UTC) of the ongoing reconfiguration. If - no reconfiguration is taking place then this value will be zero date-time. - :type reconfiguration_start_time_utc: ~datetime.datetime + self.kind = 'Put' + + +class ReconfigurationInformation(Model): + """Information about current reconfiguration like phase, type, previous + configuration role of replica and reconfiguration start date time. + + :param previous_configuration_role: Replica role before reconfiguration + started. Possible values include: 'Unknown', 'None', 'Primary', + 'IdleSecondary', 'ActiveSecondary' + :type previous_configuration_role: str or + ~azure.servicefabric.models.ReplicaRole + :param reconfiguration_phase: Current phase of ongoing reconfiguration. If + no reconfiguration is taking place then this value will be "None". + Possible values include: 'Unknown', 'None', 'Phase0', 'Phase1', 'Phase2', + 'Phase3', 'Phase4', 'AbortPhaseZero' + :type reconfiguration_phase: str or + ~azure.servicefabric.models.ReconfigurationPhase + :param reconfiguration_type: Type of current ongoing reconfiguration. If + no reconfiguration is taking place then this value will be "None". + Possible values include: 'Unknown', 'SwapPrimary', 'Failover', 'Other' + :type reconfiguration_type: str or + ~azure.servicefabric.models.ReconfigurationType + :param reconfiguration_start_time_utc: Start time (in UTC) of the ongoing + reconfiguration. If no reconfiguration is taking place then this value + will be zero date-time. + :type reconfiguration_start_time_utc: datetime """ _attribute_map = { @@ -19749,15 +16478,7 @@ class ReconfigurationInformation(msrest.serialization.Model): 'reconfiguration_start_time_utc': {'key': 'ReconfigurationStartTimeUtc', 'type': 'iso-8601'}, } - def __init__( - self, - *, - previous_configuration_role: Optional[Union[str, "ReplicaRole"]] = None, - reconfiguration_phase: Optional[Union[str, "ReconfigurationPhase"]] = None, - reconfiguration_type: Optional[Union[str, "ReconfigurationType"]] = None, - reconfiguration_start_time_utc: Optional[datetime.datetime] = None, - **kwargs - ): + def __init__(self, *, previous_configuration_role=None, reconfiguration_phase=None, reconfiguration_type=None, reconfiguration_start_time_utc=None, **kwargs) -> None: super(ReconfigurationInformation, self).__init__(**kwargs) self.previous_configuration_role = previous_configuration_role self.reconfiguration_phase = reconfiguration_phase @@ -19765,14 +16486,16 @@ def __init__( self.reconfiguration_start_time_utc = reconfiguration_start_time_utc -class RegistryCredential(msrest.serialization.Model): +class RegistryCredential(Model): """Credential information to connect to container registry. :param registry_user_name: The user name to connect to container registry. :type registry_user_name: str - :param registry_password: The password for supplied username to connect to container registry. + :param registry_password: The password for supplied username to connect to + container registry. :type registry_password: str - :param password_encrypted: Indicates that supplied container registry password is encrypted. + :param password_encrypted: Indicates that supplied container registry + password is encrypted. :type password_encrypted: bool """ @@ -19782,31 +16505,25 @@ class RegistryCredential(msrest.serialization.Model): 'password_encrypted': {'key': 'PasswordEncrypted', 'type': 'bool'}, } - def __init__( - self, - *, - registry_user_name: Optional[str] = None, - registry_password: Optional[str] = None, - password_encrypted: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, registry_user_name: str=None, registry_password: str=None, password_encrypted: bool=None, **kwargs) -> None: super(RegistryCredential, self).__init__(**kwargs) self.registry_user_name = registry_user_name self.registry_password = registry_password self.password_encrypted = password_encrypted -class ReliableCollectionsRef(msrest.serialization.Model): +class ReliableCollectionsRef(Model): """Specifying this parameter adds support for reliable collections. All required parameters must be populated in order to send to Azure. - :param name: Required. Name of ReliableCollection resource. Right now it's not used and you can - use any string. + :param name: Required. Name of ReliableCollection resource. Right now it's + not used and you can use any string. :type name: str - :param do_not_persist_state: False (the default) if ReliableCollections state is persisted to - disk as usual. True if you do not want to persist state, in which case replication is still - enabled and you can use ReliableCollections as distributed cache. + :param do_not_persist_state: False (the default) if ReliableCollections + state is persisted to disk as usual. True if you do not want to persist + state, in which case replication is still enabled and you can use + ReliableCollections as distributed cache. :type do_not_persist_state: bool """ @@ -19819,32 +16536,28 @@ class ReliableCollectionsRef(msrest.serialization.Model): 'do_not_persist_state': {'key': 'doNotPersistState', 'type': 'bool'}, } - def __init__( - self, - *, - name: str, - do_not_persist_state: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, name: str, do_not_persist_state: bool=None, **kwargs) -> None: super(ReliableCollectionsRef, self).__init__(**kwargs) self.name = name self.do_not_persist_state = do_not_persist_state -class RemoteReplicatorAcknowledgementDetail(msrest.serialization.Model): - """Provides various statistics of the acknowledgements that are being received from the remote replicator. +class RemoteReplicatorAcknowledgementDetail(Model): + """Provides various statistics of the acknowledgements that are being received + from the remote replicator. - :param average_receive_duration: Represents the average duration it takes for the remote - replicator to receive an operation. + :param average_receive_duration: Represents the average duration it takes + for the remote replicator to receive an operation. :type average_receive_duration: str - :param average_apply_duration: Represents the average duration it takes for the remote - replicator to apply an operation. This usually entails writing the operation to disk. + :param average_apply_duration: Represents the average duration it takes + for the remote replicator to apply an operation. This usually entails + writing the operation to disk. :type average_apply_duration: str - :param not_received_count: Represents the number of operations not yet received by a remote - replicator. + :param not_received_count: Represents the number of operations not yet + received by a remote replicator. :type not_received_count: str - :param received_and_not_applied_count: Represents the number of operations received and not yet - applied by a remote replicator. + :param received_and_not_applied_count: Represents the number of operations + received and not yet applied by a remote replicator. :type received_and_not_applied_count: str """ @@ -19855,15 +16568,7 @@ class RemoteReplicatorAcknowledgementDetail(msrest.serialization.Model): 'received_and_not_applied_count': {'key': 'ReceivedAndNotAppliedCount', 'type': 'str'}, } - def __init__( - self, - *, - average_receive_duration: Optional[str] = None, - average_apply_duration: Optional[str] = None, - not_received_count: Optional[str] = None, - received_and_not_applied_count: Optional[str] = None, - **kwargs - ): + def __init__(self, *, average_receive_duration: str=None, average_apply_duration: str=None, not_received_count: str=None, received_and_not_applied_count: str=None, **kwargs) -> None: super(RemoteReplicatorAcknowledgementDetail, self).__init__(**kwargs) self.average_receive_duration = average_receive_duration self.average_apply_duration = average_apply_duration @@ -19871,15 +16576,17 @@ def __init__( self.received_and_not_applied_count = received_and_not_applied_count -class RemoteReplicatorAcknowledgementStatus(msrest.serialization.Model): - """Provides details about the remote replicators from the primary replicator's point of view. +class RemoteReplicatorAcknowledgementStatus(Model): + """Provides details about the remote replicators from the primary replicator's + point of view. - :param replication_stream_acknowledgement_detail: Details about the acknowledgements for - operations that are part of the replication stream data. + :param replication_stream_acknowledgement_detail: Details about the + acknowledgements for operations that are part of the replication stream + data. :type replication_stream_acknowledgement_detail: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail - :param copy_stream_acknowledgement_detail: Details about the acknowledgements for operations - that are part of the copy stream data. + :param copy_stream_acknowledgement_detail: Details about the + acknowledgements for operations that are part of the copy stream data. :type copy_stream_acknowledgement_detail: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail """ @@ -19889,48 +16596,46 @@ class RemoteReplicatorAcknowledgementStatus(msrest.serialization.Model): 'copy_stream_acknowledgement_detail': {'key': 'CopyStreamAcknowledgementDetail', 'type': 'RemoteReplicatorAcknowledgementDetail'}, } - def __init__( - self, - *, - replication_stream_acknowledgement_detail: Optional["RemoteReplicatorAcknowledgementDetail"] = None, - copy_stream_acknowledgement_detail: Optional["RemoteReplicatorAcknowledgementDetail"] = None, - **kwargs - ): + def __init__(self, *, replication_stream_acknowledgement_detail=None, copy_stream_acknowledgement_detail=None, **kwargs) -> None: super(RemoteReplicatorAcknowledgementStatus, self).__init__(**kwargs) self.replication_stream_acknowledgement_detail = replication_stream_acknowledgement_detail self.copy_stream_acknowledgement_detail = copy_stream_acknowledgement_detail -class RemoteReplicatorStatus(msrest.serialization.Model): - """Represents the state of the secondary replicator from the primary replicator’s point of view. +class RemoteReplicatorStatus(Model): + """Represents the state of the secondary replicator from the primary + replicator’s point of view. - :param replica_id: Represents the replica ID of the remote secondary replicator. + :param replica_id: Represents the replica ID of the remote secondary + replicator. :type replica_id: str - :param last_acknowledgement_processed_time_utc: The last timestamp (in UTC) when an - acknowledgement from the secondary replicator was processed on the primary. - UTC 0 represents an invalid value, indicating that no acknowledgement messages were ever - processed. - :type last_acknowledgement_processed_time_utc: ~datetime.datetime - :param last_received_replication_sequence_number: The highest replication operation sequence - number that the secondary has received from the primary. + :param last_acknowledgement_processed_time_utc: The last timestamp (in + UTC) when an acknowledgement from the secondary replicator was processed + on the primary. + UTC 0 represents an invalid value, indicating that no acknowledgement + messages were ever processed. + :type last_acknowledgement_processed_time_utc: datetime + :param last_received_replication_sequence_number: The highest replication + operation sequence number that the secondary has received from the + primary. :type last_received_replication_sequence_number: str - :param last_applied_replication_sequence_number: The highest replication operation sequence - number that the secondary has applied to its state. + :param last_applied_replication_sequence_number: The highest replication + operation sequence number that the secondary has applied to its state. :type last_applied_replication_sequence_number: str - :param is_in_build: A value that indicates whether the secondary replica is in the process of - being built. + :param is_in_build: A value that indicates whether the secondary replica + is in the process of being built. :type is_in_build: bool - :param last_received_copy_sequence_number: The highest copy operation sequence number that the - secondary has received from the primary. + :param last_received_copy_sequence_number: The highest copy operation + sequence number that the secondary has received from the primary. A value of -1 implies that the secondary has received all copy operations. :type last_received_copy_sequence_number: str - :param last_applied_copy_sequence_number: The highest copy operation sequence number that the - secondary has applied to its state. - A value of -1 implies that the secondary has applied all copy operations and the copy process - is complete. + :param last_applied_copy_sequence_number: The highest copy operation + sequence number that the secondary has applied to its state. + A value of -1 implies that the secondary has applied all copy operations + and the copy process is complete. :type last_applied_copy_sequence_number: str - :param remote_replicator_acknowledgement_status: Represents the acknowledgment status for the - remote secondary replicator. + :param remote_replicator_acknowledgement_status: Represents the + acknowledgment status for the remote secondary replicator. :type remote_replicator_acknowledgement_status: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementStatus """ @@ -19946,19 +16651,7 @@ class RemoteReplicatorStatus(msrest.serialization.Model): 'remote_replicator_acknowledgement_status': {'key': 'RemoteReplicatorAcknowledgementStatus', 'type': 'RemoteReplicatorAcknowledgementStatus'}, } - def __init__( - self, - *, - replica_id: Optional[str] = None, - last_acknowledgement_processed_time_utc: Optional[datetime.datetime] = None, - last_received_replication_sequence_number: Optional[str] = None, - last_applied_replication_sequence_number: Optional[str] = None, - is_in_build: Optional[bool] = None, - last_received_copy_sequence_number: Optional[str] = None, - last_applied_copy_sequence_number: Optional[str] = None, - remote_replicator_acknowledgement_status: Optional["RemoteReplicatorAcknowledgementStatus"] = None, - **kwargs - ): + def __init__(self, *, replica_id: str=None, last_acknowledgement_processed_time_utc=None, last_received_replication_sequence_number: str=None, last_applied_replication_sequence_number: str=None, is_in_build: bool=None, last_received_copy_sequence_number: str=None, last_applied_copy_sequence_number: str=None, remote_replicator_acknowledgement_status=None, **kwargs) -> None: super(RemoteReplicatorStatus, self).__init__(**kwargs) self.replica_id = replica_id self.last_acknowledgement_processed_time_utc = last_acknowledgement_processed_time_utc @@ -19970,87 +16663,95 @@ def __init__( self.remote_replicator_acknowledgement_status = remote_replicator_acknowledgement_status -class RepairTask(msrest.serialization.Model): - """Represents a repair task, which includes information about what kind of repair was requested, what its progress is, and what its final result was. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. +class RepairTask(Model): + """Represents a repair task, which includes information about what kind of + repair was requested, what its progress is, and what its final result was. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str :param version: The version of the repair task. - When creating a new repair task, the version must be set to zero. When updating a repair - task, + When creating a new repair task, the version must be set to zero. When + updating a repair task, the version is used for optimistic concurrency checks. If the version is - set to zero, the update will not check for write conflicts. If the version is set to a - non-zero value, then the - update will only succeed if the actual current version of the repair task matches this value. + set to zero, the update will not check for write conflicts. If the + version is set to a non-zero value, then the + update will only succeed if the actual current version of the repair task + matches this value. :type version: str - :param description: A description of the purpose of the repair task, or other informational - details. + :param description: A description of the purpose of the repair task, or + other informational details. May be set when the repair task is created, and is immutable once set. :type description: str - :param state: Required. The workflow state of the repair task. Valid initial states are - Created, Claimed, and Preparing. Possible values include: "Invalid", "Created", "Claimed", - "Preparing", "Approved", "Executing", "Restoring", "Completed". + :param state: Required. The workflow state of the repair task. Valid + initial states are Created, Claimed, and Preparing. Possible values + include: 'Invalid', 'Created', 'Claimed', 'Preparing', 'Approved', + 'Executing', 'Restoring', 'Completed' :type state: str or ~azure.servicefabric.models.State - :param flags: A bitwise-OR of the following values, which gives additional details about the - status of the repair task. - - - * 1 - Cancellation of the repair has been requested - * 2 - Abort of the repair has been requested - * 4 - Approval of the repair was forced via client request. + :param flags: A bitwise-OR of the following values, which gives additional + details about the status of the repair task. + - 1 - Cancellation of the repair has been requested + - 2 - Abort of the repair has been requested + - 4 - Approval of the repair was forced via client request :type flags: int - :param action: Required. The requested repair action. Must be specified when the repair task is - created, and is immutable once set. + :param action: Required. The requested repair action. Must be specified + when the repair task is created, and is immutable once set. :type action: str - :param target: The target object determines what actions the system will take to prepare for - the impact of the repair, prior to approving execution of the repair. + :param target: The target object determines what actions the system will + take to prepare for the impact of the repair, prior to approving execution + of the repair. May be set when the repair task is created, and is immutable once set. :type target: ~azure.servicefabric.models.RepairTargetDescriptionBase - :param executor: The name of the repair executor. Must be specified in Claimed and later - states, and is immutable once set. + :param executor: The name of the repair executor. Must be specified in + Claimed and later states, and is immutable once set. :type executor: str - :param executor_data: A data string that the repair executor can use to store its internal - state. + :param executor_data: A data string that the repair executor can use to + store its internal state. :type executor_data: str - :param impact: The impact object determines what actions the system will take to prepare for - the impact of the repair, prior to approving execution of the repair. - Impact must be specified by the repair executor when transitioning to the Preparing state, and - is immutable once set. + :param impact: The impact object determines what actions the system will + take to prepare for the impact of the repair, prior to approving execution + of the repair. + Impact must be specified by the repair executor when transitioning to the + Preparing state, and is immutable once set. :type impact: ~azure.servicefabric.models.RepairImpactDescriptionBase - :param result_status: A value describing the overall result of the repair task execution. Must - be specified in the Restoring and later states, and is immutable once set. Possible values - include: "Invalid", "Succeeded", "Cancelled", "Interrupted", "Failed", "Pending". + :param result_status: A value describing the overall result of the repair + task execution. Must be specified in the Restoring and later states, and + is immutable once set. Possible values include: 'Invalid', 'Succeeded', + 'Cancelled', 'Interrupted', 'Failed', 'Pending' :type result_status: str or ~azure.servicefabric.models.ResultStatus - :param result_code: A numeric value providing additional details about the result of the repair - task execution. - May be specified in the Restoring and later states, and is immutable once set. + :param result_code: A numeric value providing additional details about the + result of the repair task execution. + May be specified in the Restoring and later states, and is immutable once + set. :type result_code: int - :param result_details: A string providing additional details about the result of the repair - task execution. - May be specified in the Restoring and later states, and is immutable once set. + :param result_details: A string providing additional details about the + result of the repair task execution. + May be specified in the Restoring and later states, and is immutable once + set. :type result_details: str - :param history: An object that contains timestamps of the repair task's state transitions. - These timestamps are updated by the system, and cannot be directly modified. + :param history: An object that contains timestamps of the repair task's + state transitions. + These timestamps are updated by the system, and cannot be directly + modified. :type history: ~azure.servicefabric.models.RepairTaskHistory - :param preparing_health_check_state: The workflow state of the health check when the repair - task is in the Preparing state. Possible values include: "NotStarted", "InProgress", - "Succeeded", "Skipped", "TimedOut". + :param preparing_health_check_state: The workflow state of the health + check when the repair task is in the Preparing state. Possible values + include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' :type preparing_health_check_state: str or ~azure.servicefabric.models.RepairTaskHealthCheckState - :param restoring_health_check_state: The workflow state of the health check when the repair - task is in the Restoring state. Possible values include: "NotStarted", "InProgress", - "Succeeded", "Skipped", "TimedOut". + :param restoring_health_check_state: The workflow state of the health + check when the repair task is in the Restoring state. Possible values + include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' :type restoring_health_check_state: str or ~azure.servicefabric.models.RepairTaskHealthCheckState - :param perform_preparing_health_check: A value to determine if health checks will be performed - when the repair task enters the Preparing state. + :param perform_preparing_health_check: A value to determine if health + checks will be performed when the repair task enters the Preparing state. :type perform_preparing_health_check: bool - :param perform_restoring_health_check: A value to determine if health checks will be performed - when the repair task enters the Restoring state. + :param perform_restoring_health_check: A value to determine if health + checks will be performed when the repair task enters the Restoring state. :type perform_restoring_health_check: bool """ @@ -20081,29 +16782,7 @@ class RepairTask(msrest.serialization.Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__( - self, - *, - task_id: str, - state: Union[str, "State"], - action: str, - version: Optional[str] = None, - description: Optional[str] = None, - flags: Optional[int] = None, - target: Optional["RepairTargetDescriptionBase"] = None, - executor: Optional[str] = None, - executor_data: Optional[str] = None, - impact: Optional["RepairImpactDescriptionBase"] = None, - result_status: Optional[Union[str, "ResultStatus"]] = None, - result_code: Optional[int] = None, - result_details: Optional[str] = None, - history: Optional["RepairTaskHistory"] = None, - preparing_health_check_state: Optional[Union[str, "RepairTaskHealthCheckState"]] = None, - restoring_health_check_state: Optional[Union[str, "RepairTaskHealthCheckState"]] = None, - perform_preparing_health_check: Optional[bool] = None, - perform_restoring_health_check: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, task_id: str, state, action: str, version: str=None, description: str=None, flags: int=None, target=None, executor: str=None, executor_data: str=None, impact=None, result_status=None, result_code: int=None, result_details: str=None, history=None, preparing_health_check_state=None, restoring_health_check_state=None, perform_preparing_health_check: bool=None, perform_restoring_health_check: bool=None, **kwargs) -> None: super(RepairTask, self).__init__(**kwargs) self.task_id = task_id self.version = version @@ -20125,18 +16804,19 @@ def __init__( self.perform_restoring_health_check = perform_restoring_health_check -class RepairTaskApproveDescription(msrest.serialization.Model): +class RepairTaskApproveDescription(Model): """Describes a request for forced approval of a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version check + is performed. :type version: str """ @@ -20149,34 +16829,29 @@ class RepairTaskApproveDescription(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - *, - task_id: str, - version: Optional[str] = None, - **kwargs - ): + def __init__(self, *, task_id: str, version: str=None, **kwargs) -> None: super(RepairTaskApproveDescription, self).__init__(**kwargs) self.task_id = task_id self.version = version -class RepairTaskCancelDescription(msrest.serialization.Model): +class RepairTaskCancelDescription(Model): """Describes a request to cancel a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task. :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version check + is performed. :type version: str - :param request_abort: *True* if the repair should be stopped as soon as possible even if it has - already started executing. *False* if the repair should be cancelled only if execution has not - yet started. + :param request_abort: _True_ if the repair should be stopped as soon as + possible even if it has already started executing. _False_ if the repair + should be cancelled only if execution has not yet started. :type request_abort: bool """ @@ -20190,32 +16865,27 @@ class RepairTaskCancelDescription(msrest.serialization.Model): 'request_abort': {'key': 'RequestAbort', 'type': 'bool'}, } - def __init__( - self, - *, - task_id: str, - version: Optional[str] = None, - request_abort: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, task_id: str, version: str=None, request_abort: bool=None, **kwargs) -> None: super(RepairTaskCancelDescription, self).__init__(**kwargs) self.task_id = task_id self.version = version self.request_abort = request_abort -class RepairTaskDeleteDescription(msrest.serialization.Model): +class RepairTaskDeleteDescription(Model): """Describes a request to delete a completed repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. - :param task_id: Required. The ID of the completed repair task to be deleted. + :param task_id: Required. The ID of the completed repair task to be + deleted. :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version check + is performed. :type version: str """ @@ -20228,49 +16898,50 @@ class RepairTaskDeleteDescription(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - *, - task_id: str, - version: Optional[str] = None, - **kwargs - ): + def __init__(self, *, task_id: str, version: str=None, **kwargs) -> None: super(RepairTaskDeleteDescription, self).__init__(**kwargs) self.task_id = task_id self.version = version -class RepairTaskHistory(msrest.serialization.Model): +class RepairTaskHistory(Model): """A record of the times when the repair task entered each state. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. - - :param created_utc_timestamp: The time when the repair task entered the Created state. - :type created_utc_timestamp: ~datetime.datetime - :param claimed_utc_timestamp: The time when the repair task entered the Claimed state. - :type claimed_utc_timestamp: ~datetime.datetime - :param preparing_utc_timestamp: The time when the repair task entered the Preparing state. - :type preparing_utc_timestamp: ~datetime.datetime - :param approved_utc_timestamp: The time when the repair task entered the Approved state. - :type approved_utc_timestamp: ~datetime.datetime - :param executing_utc_timestamp: The time when the repair task entered the Executing state. - :type executing_utc_timestamp: ~datetime.datetime - :param restoring_utc_timestamp: The time when the repair task entered the Restoring state. - :type restoring_utc_timestamp: ~datetime.datetime - :param completed_utc_timestamp: The time when the repair task entered the Completed state. - :type completed_utc_timestamp: ~datetime.datetime - :param preparing_health_check_start_utc_timestamp: The time when the repair task started the - health check in the Preparing state. - :type preparing_health_check_start_utc_timestamp: ~datetime.datetime - :param preparing_health_check_end_utc_timestamp: The time when the repair task completed the - health check in the Preparing state. - :type preparing_health_check_end_utc_timestamp: ~datetime.datetime - :param restoring_health_check_start_utc_timestamp: The time when the repair task started the - health check in the Restoring state. - :type restoring_health_check_start_utc_timestamp: ~datetime.datetime - :param restoring_health_check_end_utc_timestamp: The time when the repair task completed the - health check in the Restoring state. - :type restoring_health_check_end_utc_timestamp: ~datetime.datetime + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + :param created_utc_timestamp: The time when the repair task entered the + Created state. + :type created_utc_timestamp: datetime + :param claimed_utc_timestamp: The time when the repair task entered the + Claimed state. + :type claimed_utc_timestamp: datetime + :param preparing_utc_timestamp: The time when the repair task entered the + Preparing state. + :type preparing_utc_timestamp: datetime + :param approved_utc_timestamp: The time when the repair task entered the + Approved state + :type approved_utc_timestamp: datetime + :param executing_utc_timestamp: The time when the repair task entered the + Executing state + :type executing_utc_timestamp: datetime + :param restoring_utc_timestamp: The time when the repair task entered the + Restoring state + :type restoring_utc_timestamp: datetime + :param completed_utc_timestamp: The time when the repair task entered the + Completed state + :type completed_utc_timestamp: datetime + :param preparing_health_check_start_utc_timestamp: The time when the + repair task started the health check in the Preparing state. + :type preparing_health_check_start_utc_timestamp: datetime + :param preparing_health_check_end_utc_timestamp: The time when the repair + task completed the health check in the Preparing state. + :type preparing_health_check_end_utc_timestamp: datetime + :param restoring_health_check_start_utc_timestamp: The time when the + repair task started the health check in the Restoring state. + :type restoring_health_check_start_utc_timestamp: datetime + :param restoring_health_check_end_utc_timestamp: The time when the repair + task completed the health check in the Restoring state. + :type restoring_health_check_end_utc_timestamp: datetime """ _attribute_map = { @@ -20287,22 +16958,7 @@ class RepairTaskHistory(msrest.serialization.Model): 'restoring_health_check_end_utc_timestamp': {'key': 'RestoringHealthCheckEndUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - created_utc_timestamp: Optional[datetime.datetime] = None, - claimed_utc_timestamp: Optional[datetime.datetime] = None, - preparing_utc_timestamp: Optional[datetime.datetime] = None, - approved_utc_timestamp: Optional[datetime.datetime] = None, - executing_utc_timestamp: Optional[datetime.datetime] = None, - restoring_utc_timestamp: Optional[datetime.datetime] = None, - completed_utc_timestamp: Optional[datetime.datetime] = None, - preparing_health_check_start_utc_timestamp: Optional[datetime.datetime] = None, - preparing_health_check_end_utc_timestamp: Optional[datetime.datetime] = None, - restoring_health_check_start_utc_timestamp: Optional[datetime.datetime] = None, - restoring_health_check_end_utc_timestamp: Optional[datetime.datetime] = None, - **kwargs - ): + def __init__(self, *, created_utc_timestamp=None, claimed_utc_timestamp=None, preparing_utc_timestamp=None, approved_utc_timestamp=None, executing_utc_timestamp=None, restoring_utc_timestamp=None, completed_utc_timestamp=None, preparing_health_check_start_utc_timestamp=None, preparing_health_check_end_utc_timestamp=None, restoring_health_check_start_utc_timestamp=None, restoring_health_check_end_utc_timestamp=None, **kwargs) -> None: super(RepairTaskHistory, self).__init__(**kwargs) self.created_utc_timestamp = created_utc_timestamp self.claimed_utc_timestamp = claimed_utc_timestamp @@ -20317,26 +16973,29 @@ def __init__( self.restoring_health_check_end_utc_timestamp = restoring_health_check_end_utc_timestamp -class RepairTaskUpdateHealthPolicyDescription(msrest.serialization.Model): +class RepairTaskUpdateHealthPolicyDescription(Model): """Describes a request to update the health policy of a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. :param task_id: Required. The ID of the repair task to be updated. :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current value of the repair task. If zero, - then no version check is performed. + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current value of the repair task. If zero, then no version check is + performed. :type version: str - :param perform_preparing_health_check: A boolean indicating if health check is to be performed - in the Preparing stage of the repair task. If not specified the existing value should not be - altered. Otherwise, specify the desired new value. + :param perform_preparing_health_check: A boolean indicating if health + check is to be performed in the Preparing stage of the repair task. If not + specified the existing value should not be altered. Otherwise, specify the + desired new value. :type perform_preparing_health_check: bool - :param perform_restoring_health_check: A boolean indicating if health check is to be performed - in the Restoring stage of the repair task. If not specified the existing value should not be - altered. Otherwise, specify the desired new value. + :param perform_restoring_health_check: A boolean indicating if health + check is to be performed in the Restoring stage of the repair task. If not + specified the existing value should not be altered. Otherwise, specify the + desired new value. :type perform_restoring_health_check: bool """ @@ -20351,15 +17010,7 @@ class RepairTaskUpdateHealthPolicyDescription(msrest.serialization.Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__( - self, - *, - task_id: str, - version: Optional[str] = None, - perform_preparing_health_check: Optional[bool] = None, - perform_restoring_health_check: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, task_id: str, version: str=None, perform_preparing_health_check: bool=None, perform_restoring_health_check: bool=None, **kwargs) -> None: super(RepairTaskUpdateHealthPolicyDescription, self).__init__(**kwargs) self.task_id = task_id self.version = version @@ -20367,10 +17018,10 @@ def __init__( self.perform_restoring_health_check = perform_restoring_health_check -class RepairTaskUpdateInfo(msrest.serialization.Model): +class RepairTaskUpdateInfo(Model): """Describes the result of an operation that created or updated a repair task. - -This type supports the Service Fabric platform; it is not meant to be used directly from your code. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. All required parameters must be populated in order to send to Azure. @@ -20386,45 +17037,44 @@ class RepairTaskUpdateInfo(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - *, - version: str, - **kwargs - ): + def __init__(self, *, version: str, **kwargs) -> None: super(RepairTaskUpdateInfo, self).__init__(**kwargs) self.version = version class ReplicaHealth(EntityHealth): - """Represents a base class for stateful service replica or stateless service instance health. -Contains the replica aggregated health state, the health events and the unhealthy evaluations. + """Represents a base class for stateful service replica or stateless service + instance health. + Contains the replica aggregated health state, the health events and the + unhealthy evaluations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaHealth, StatelessServiceInstanceHealth. + sub-classes are: StatefulServiceReplicaHealth, + StatelessServiceInstanceHealth All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -20436,66 +17086,58 @@ class ReplicaHealth(EntityHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaHealth', 'Stateless': 'StatelessServiceInstanceHealth'} } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, **kwargs) -> None: super(ReplicaHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) - self.service_kind = 'ReplicaHealth' # type: str self.partition_id = partition_id + self.service_kind = None + self.service_kind = 'ReplicaHealth' class ReplicaHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a replica, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a replica, containing information about + the data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param partition_id: Id of the partition to which the replica belongs. :type partition_id: str - :param replica_or_instance_id: Id of a stateful service replica or a stateless service - instance. This ID is used in the queries that apply to both stateful and stateless services. It - is used by Service Fabric to uniquely identify a replica of a partition of a stateful service - or an instance of a stateless service partition. It is unique within a partition and does not - change for the lifetime of the replica or the instance. If a stateful replica gets dropped and - another replica gets created on the same node for the same partition, it will get a different - value for the ID. If a stateless instance is failed over on the same or different node it will + :param replica_or_instance_id: Id of a stateful service replica or a + stateless service instance. This ID is used in the queries that apply to + both stateful and stateless services. It is used by Service Fabric to + uniquely identify a replica of a partition of a stateful service or an + instance of a stateless service partition. It is unique within a partition + and does not change for the lifetime of the replica or the instance. If a + stateful replica gets dropped and another replica gets created on the same + node for the same partition, it will get a different value for the ID. If + a stateless instance is failed over on the same or different node it will get a different value for the ID. :type replica_or_instance_id: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the replica. The types of the unhealthy evaluations can be - EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the replica. The types of the + unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -20503,48 +17145,43 @@ class ReplicaHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - partition_id: Optional[str] = None, - replica_or_instance_id: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, partition_id: str=None, replica_or_instance_id: str=None, unhealthy_evaluations=None, **kwargs) -> None: super(ReplicaHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Replica' # type: str self.partition_id = partition_id self.replica_or_instance_id = replica_or_instance_id self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Replica' class ReplicaHealthState(EntityHealthState): - """Represents a base class for stateful service replica or stateless service instance health state. + """Represents a base class for stateful service replica or stateless service + instance health state. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaHealthState, StatelessServiceInstanceHealthState. + sub-classes are: StatefulServiceReplicaHealthState, + StatelessServiceInstanceHealthState All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param partition_id: The ID of the partition to which this replica belongs. + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: The ID of the partition to which this replica + belongs. :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -20553,41 +17190,40 @@ class ReplicaHealthState(EntityHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaHealthState', 'Stateless': 'StatelessServiceInstanceHealthState'} } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, partition_id: str=None, **kwargs) -> None: super(ReplicaHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) - self.service_kind = 'ReplicaHealthState' # type: str self.partition_id = partition_id + self.service_kind = None + self.service_kind = 'ReplicaHealthState' class ReplicaHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a stateful service replica or a stateless service instance. -The replica health state contains the replica ID and its aggregated health state. - - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + """Represents the health state chunk of a stateful service replica or a + stateless service instance. + The replica health state contains the replica ID and its aggregated health + state. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param replica_or_instance_id: Id of a stateful service replica or a stateless service - instance. This ID is used in the queries that apply to both stateful and stateless services. It - is used by Service Fabric to uniquely identify a replica of a partition of a stateful service - or an instance of a stateless service partition. It is unique within a partition and does not - change for the lifetime of the replica or the instance. If a stateful replica gets dropped and - another replica gets created on the same node for the same partition, it will get a different - value for the ID. If a stateless instance is failed over on the same or different node it will + :param replica_or_instance_id: Id of a stateful service replica or a + stateless service instance. This ID is used in the queries that apply to + both stateful and stateless services. It is used by Service Fabric to + uniquely identify a replica of a partition of a stateful service or an + instance of a stateless service partition. It is unique within a partition + and does not change for the lifetime of the replica or the instance. If a + stateful replica gets dropped and another replica gets created on the same + node for the same partition, it will get a different value for the ID. If + a stateless instance is failed over on the same or different node it will get a different value for the ID. :type replica_or_instance_id: str """ @@ -20597,22 +17233,17 @@ class ReplicaHealthStateChunk(EntityHealthStateChunk): 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - replica_or_instance_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, health_state=None, replica_or_instance_id: str=None, **kwargs) -> None: super(ReplicaHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.replica_or_instance_id = replica_or_instance_id -class ReplicaHealthStateChunkList(msrest.serialization.Model): - """The list of replica health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. +class ReplicaHealthStateChunkList(Model): + """The list of replica health state chunks that respect the input filters in + the chunk query. Returned by get cluster health state chunks query. - :param items: The list of replica health state chunks that respect the input filters in the - chunk query. + :param items: The list of replica health state chunks that respect the + input filters in the chunk query. :type items: list[~azure.servicefabric.models.ReplicaHealthStateChunk] """ @@ -20620,51 +17251,56 @@ class ReplicaHealthStateChunkList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ReplicaHealthStateChunk]'}, } - def __init__( - self, - *, - items: Optional[List["ReplicaHealthStateChunk"]] = None, - **kwargs - ): + def __init__(self, *, items=None, **kwargs) -> None: super(ReplicaHealthStateChunkList, self).__init__(**kwargs) self.items = items -class ReplicaHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a replica should be included as a child of a partition in the cluster health chunk. -The replicas are only returned if the parent entities match a filter specified in the cluster health chunk query description. The parent partition, service and application must be included in the cluster health chunk. -One filter can match zero, one or multiple replicas, depending on its properties. - - :param replica_or_instance_id_filter: Id of the stateful service replica or stateless service - instance that matches the filter. The filter is applied only to the specified replica, if it - exists. - If the replica doesn't exist, no replica is returned in the cluster health chunk based on this - filter. - If the replica exists, it is included in the cluster health chunk if it respects the other - filter properties. - If not specified, all replicas that match the parent filters (if any) are taken into - consideration and matched against the other filter members, like health state filter. +class ReplicaHealthStateFilter(Model): + """Defines matching criteria to determine whether a replica should be included + as a child of a partition in the cluster health chunk. + The replicas are only returned if the parent entities match a filter + specified in the cluster health chunk query description. The parent + partition, service and application must be included in the cluster health + chunk. + One filter can match zero, one or multiple replicas, depending on its + properties. + + :param replica_or_instance_id_filter: Id of the stateful service replica + or stateless service instance that matches the filter. The filter is + applied only to the specified replica, if it exists. + If the replica doesn't exist, no replica is returned in the cluster health + chunk based on this filter. + If the replica exists, it is included in the cluster health chunk if it + respects the other filter properties. + If not specified, all replicas that match the parent filters (if any) are + taken into consideration and matched against the other filter members, + like health state filter. :type replica_or_instance_id_filter: str - :param health_state_filter: The filter for the health state of the replicas. It allows - selecting replicas if they match the desired health states. - The possible values are integer value of one of the following health states. Only replicas - that match the filter are returned. All replicas are used to evaluate the parent partition - aggregated health state. - If not specified, default value is None, unless the replica ID is specified. If the filter has - default value and replica ID is specified, the matching replica is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches replicas with HealthState value of OK (2) - and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + replicas. It allows selecting replicas if they match the desired health + states. + The possible values are integer value of one of the following health + states. Only replicas that match the filter are returned. All replicas are + used to evaluate the parent partition aggregated health state. + If not specified, default value is None, unless the replica ID is + specified. If the filter has default value and replica ID is specified, + the matching replica is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches replicas with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int """ @@ -20673,42 +17309,38 @@ class ReplicaHealthStateFilter(msrest.serialization.Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__( - self, - *, - replica_or_instance_id_filter: Optional[str] = None, - health_state_filter: Optional[int] = 0, - **kwargs - ): + def __init__(self, *, replica_or_instance_id_filter: str=None, health_state_filter: int=0, **kwargs) -> None: super(ReplicaHealthStateFilter, self).__init__(**kwargs) self.replica_or_instance_id_filter = replica_or_instance_id_filter self.health_state_filter = health_state_filter -class ReplicaInfo(msrest.serialization.Model): - """Information about the identity, status, health, node name, uptime, and other details about the replica. +class ReplicaInfo(Model): + """Information about the identity, status, health, node name, uptime, and + other details about the replica. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo. + sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of + the replica in seconds. :type last_in_build_duration_in_seconds: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -20716,45 +17348,37 @@ class ReplicaInfo(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceReplicaInfo', 'Stateless': 'StatelessServiceInstanceInfo'} } - def __init__( - self, - *, - replica_status: Optional[Union[str, "ReplicaStatus"]] = None, - health_state: Optional[Union[str, "HealthState"]] = None, - node_name: Optional[str] = None, - address: Optional[str] = None, - last_in_build_duration_in_seconds: Optional[str] = None, - **kwargs - ): + def __init__(self, *, replica_status=None, health_state=None, node_name: str=None, address: str=None, last_in_build_duration_in_seconds: str=None, **kwargs) -> None: super(ReplicaInfo, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.replica_status = replica_status self.health_state = health_state self.node_name = node_name self.address = address self.last_in_build_duration_in_seconds = last_in_build_duration_in_seconds + self.service_kind = None -class ReplicaLifecycleDescription(msrest.serialization.Model): +class ReplicaLifecycleDescription(Model): """Describes how the replica will behave. - :param is_singleton_replica_move_allowed_during_upgrade: If set to true, replicas with a target - replica set size of 1 will be permitted to move during upgrade. + :param is_singleton_replica_move_allowed_during_upgrade: If set to true, + replicas with a target replica set size of 1 will be permitted to move + during upgrade. :type is_singleton_replica_move_allowed_during_upgrade: bool - :param restore_replica_location_after_upgrade: If set to true, move/swap replica to original - location after upgrade. + :param restore_replica_location_after_upgrade: If set to true, move/swap + replica to original location after upgrade. :type restore_replica_location_after_upgrade: bool """ @@ -20763,26 +17387,22 @@ class ReplicaLifecycleDescription(msrest.serialization.Model): 'restore_replica_location_after_upgrade': {'key': 'RestoreReplicaLocationAfterUpgrade', 'type': 'bool'}, } - def __init__( - self, - *, - is_singleton_replica_move_allowed_during_upgrade: Optional[bool] = None, - restore_replica_location_after_upgrade: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, is_singleton_replica_move_allowed_during_upgrade: bool=None, restore_replica_location_after_upgrade: bool=None, **kwargs) -> None: super(ReplicaLifecycleDescription, self).__init__(**kwargs) self.is_singleton_replica_move_allowed_during_upgrade = is_singleton_replica_move_allowed_during_upgrade self.restore_replica_location_after_upgrade = restore_replica_location_after_upgrade -class ReplicaMetricLoadDescription(msrest.serialization.Model): - """Specifies metric loads of a partition's specific secondary replica or instance. +class ReplicaMetricLoadDescription(Model): + """Specifies metric loads of a partition's specific secondary replica or + instance. :param node_name: Node name of a specific secondary replica or instance. :type node_name: str - :param replica_or_instance_load_entries: Loads of a different metrics for a partition's - secondary replica or instance. - :type replica_or_instance_load_entries: list[~azure.servicefabric.models.MetricLoadDescription] + :param replica_or_instance_load_entries: Loads of a different metrics for + a partition's secondary replica or instance. + :type replica_or_instance_load_entries: + list[~azure.servicefabric.models.MetricLoadDescription] """ _attribute_map = { @@ -20790,48 +17410,43 @@ class ReplicaMetricLoadDescription(msrest.serialization.Model): 'replica_or_instance_load_entries': {'key': 'ReplicaOrInstanceLoadEntries', 'type': '[MetricLoadDescription]'}, } - def __init__( - self, - *, - node_name: Optional[str] = None, - replica_or_instance_load_entries: Optional[List["MetricLoadDescription"]] = None, - **kwargs - ): + def __init__(self, *, node_name: str=None, replica_or_instance_load_entries=None, **kwargs) -> None: super(ReplicaMetricLoadDescription, self).__init__(**kwargs) self.node_name = node_name self.replica_or_instance_load_entries = replica_or_instance_load_entries class ReplicasHealthEvaluation(HealthEvaluation): - """Represents health evaluation for replicas, containing health evaluations for each unhealthy replica that impacted current aggregated health state. Can be returned when evaluating partition health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for replicas, containing health evaluations + for each unhealthy replica that impacted current aggregated health state. + Can be returned when evaluating partition health and the aggregated health + state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param max_percent_unhealthy_replicas_per_partition: Maximum allowed percentage of unhealthy - replicas per partition from the ApplicationHealthPolicy. + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_replicas_per_partition: Maximum allowed + percentage of unhealthy replicas per partition from the + ApplicationHealthPolicy. :type max_percent_unhealthy_replicas_per_partition: int - :param total_count: Total number of replicas in the partition from the health store. + :param total_count: Total number of replicas in the partition from the + health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy ReplicaHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ReplicaHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -20839,60 +17454,58 @@ class ReplicasHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'max_percent_unhealthy_replicas_per_partition': {'key': 'MaxPercentUnhealthyReplicasPerPartition', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - max_percent_unhealthy_replicas_per_partition: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_replicas_per_partition: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(ReplicasHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Replicas' # type: str self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Replicas' -class ReplicatorQueueStatus(msrest.serialization.Model): - """Provides various statistics of the queue used in the service fabric replicator. -Contains information about the service fabric replicator like the replication/copy queue utilization, last acknowledgement received timestamp, etc. -Depending on the role of the replicator, the properties in this type imply different meanings. +class ReplicatorQueueStatus(Model): + """Provides various statistics of the queue used in the service fabric + replicator. + Contains information about the service fabric replicator like the + replication/copy queue utilization, last acknowledgement received + timestamp, etc. + Depending on the role of the replicator, the properties in this type imply + different meanings. - :param queue_utilization_percentage: Represents the utilization of the queue. A value of 0 - indicates that the queue is empty and a value of 100 indicates the queue is full. + :param queue_utilization_percentage: Represents the utilization of the + queue. A value of 0 indicates that the queue is empty and a value of 100 + indicates the queue is full. :type queue_utilization_percentage: int - :param queue_memory_size: Represents the virtual memory consumed by the queue in bytes. + :param queue_memory_size: Represents the virtual memory consumed by the + queue in bytes. :type queue_memory_size: str - :param first_sequence_number: On a primary replicator, this is semantically the sequence number - of the operation for which all the secondary replicas have sent an acknowledgement. - On a secondary replicator, this is the smallest sequence number of the operation that is - present in the queue. + :param first_sequence_number: On a primary replicator, this is + semantically the sequence number of the operation for which all the + secondary replicas have sent an acknowledgement. + On a secondary replicator, this is the smallest sequence number of the + operation that is present in the queue. :type first_sequence_number: str - :param completed_sequence_number: On a primary replicator, this is semantically the highest - sequence number of the operation for which all the secondary replicas have sent an - acknowledgement. - On a secondary replicator, this is semantically the highest sequence number that has been - applied to the persistent state. + :param completed_sequence_number: On a primary replicator, this is + semantically the highest sequence number of the operation for which all + the secondary replicas have sent an acknowledgement. + On a secondary replicator, this is semantically the highest sequence + number that has been applied to the persistent state. :type completed_sequence_number: str - :param committed_sequence_number: On a primary replicator, this is semantically the highest - sequence number of the operation for which a write quorum of the secondary replicas have sent - an acknowledgement. - On a secondary replicator, this is semantically the highest sequence number of the in-order - operation received from the primary. + :param committed_sequence_number: On a primary replicator, this is + semantically the highest sequence number of the operation for which a + write quorum of the secondary replicas have sent an acknowledgement. + On a secondary replicator, this is semantically the highest sequence + number of the in-order operation received from the primary. :type committed_sequence_number: str - :param last_sequence_number: Represents the latest sequence number of the operation that is - available in the queue. + :param last_sequence_number: Represents the latest sequence number of the + operation that is available in the queue. :type last_sequence_number: str """ @@ -20905,17 +17518,7 @@ class ReplicatorQueueStatus(msrest.serialization.Model): 'last_sequence_number': {'key': 'LastSequenceNumber', 'type': 'str'}, } - def __init__( - self, - *, - queue_utilization_percentage: Optional[int] = None, - queue_memory_size: Optional[str] = None, - first_sequence_number: Optional[str] = None, - completed_sequence_number: Optional[str] = None, - committed_sequence_number: Optional[str] = None, - last_sequence_number: Optional[str] = None, - **kwargs - ): + def __init__(self, *, queue_utilization_percentage: int=None, queue_memory_size: str=None, first_sequence_number: str=None, completed_sequence_number: str=None, committed_sequence_number: str=None, last_sequence_number: str=None, **kwargs) -> None: super(ReplicatorQueueStatus, self).__init__(**kwargs) self.queue_utilization_percentage = queue_utilization_percentage self.queue_memory_size = queue_memory_size @@ -20925,14 +17528,16 @@ def __init__( self.last_sequence_number = last_sequence_number -class ResolvedServiceEndpoint(msrest.serialization.Model): +class ResolvedServiceEndpoint(Model): """Endpoint of a resolved service partition. - :param kind: The role of the replica where the endpoint is reported. Possible values include: - "Invalid", "Stateless", "StatefulPrimary", "StatefulSecondary". + :param kind: The role of the replica where the endpoint is reported. + Possible values include: 'Invalid', 'Stateless', 'StatefulPrimary', + 'StatefulSecondary' :type kind: str or ~azure.servicefabric.models.ServiceEndpointRole - :param address: The address of the endpoint. If the endpoint has multiple listeners the address - is a JSON object with one property per listener with the value as the address of that listener. + :param address: The address of the endpoint. If the endpoint has multiple + listeners the address is a JSON object with one property per listener with + the value as the address of that listener. :type address: str """ @@ -20941,32 +17546,30 @@ class ResolvedServiceEndpoint(msrest.serialization.Model): 'address': {'key': 'Address', 'type': 'str'}, } - def __init__( - self, - *, - kind: Optional[Union[str, "ServiceEndpointRole"]] = None, - address: Optional[str] = None, - **kwargs - ): + def __init__(self, *, kind=None, address: str=None, **kwargs) -> None: super(ResolvedServiceEndpoint, self).__init__(**kwargs) self.kind = kind self.address = address -class ResolvedServicePartition(msrest.serialization.Model): +class ResolvedServicePartition(Model): """Information about a service partition and its associated endpoints. All required parameters must be populated in order to send to Azure. - :param name: Required. The full name of the service with 'fabric:' URI scheme. + :param name: Required. The full name of the service with 'fabric:' URI + scheme. :type name: str - :param partition_information: Required. A representation of the resolved partition. - :type partition_information: ~azure.servicefabric.models.PartitionInformation - :param endpoints: Required. List of resolved service endpoints of a service partition. + :param partition_information: Required. A representation of the resolved + partition. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param endpoints: Required. List of resolved service endpoints of a + service partition. :type endpoints: list[~azure.servicefabric.models.ResolvedServiceEndpoint] - :param version: Required. The version of this resolved service partition result. This version - should be passed in the next time the ResolveService call is made via the PreviousRspVersion - query parameter. + :param version: Required. The version of this resolved service partition + result. This version should be passed in the next time the ResolveService + call is made via the PreviousRspVersion query parameter. :type version: str """ @@ -20984,15 +17587,7 @@ class ResolvedServicePartition(msrest.serialization.Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - partition_information: "PartitionInformation", - endpoints: List["ResolvedServiceEndpoint"], - version: str, - **kwargs - ): + def __init__(self, *, name: str, partition_information, endpoints, version: str, **kwargs) -> None: super(ResolvedServicePartition, self).__init__(**kwargs) self.name = name self.partition_information = partition_information @@ -21000,12 +17595,15 @@ def __init__( self.version = version -class ResourceLimits(msrest.serialization.Model): - """This type describes the resource limits for a given container. It describes the most amount of resources a container is allowed to use before being restarted. +class ResourceLimits(Model): + """This type describes the resource limits for a given container. It describes + the most amount of resources a container is allowed to use before being + restarted. :param memory_in_gb: The memory limit in GB. :type memory_in_gb: float - :param cpu: CPU limits in cores. At present, only full cores are supported. + :param cpu: CPU limits in cores. At present, only full cores are + supported. :type cpu: float """ @@ -21014,26 +17612,26 @@ class ResourceLimits(msrest.serialization.Model): 'cpu': {'key': 'cpu', 'type': 'float'}, } - def __init__( - self, - *, - memory_in_gb: Optional[float] = None, - cpu: Optional[float] = None, - **kwargs - ): + def __init__(self, *, memory_in_gb: float=None, cpu: float=None, **kwargs) -> None: super(ResourceLimits, self).__init__(**kwargs) self.memory_in_gb = memory_in_gb self.cpu = cpu -class ResourceRequests(msrest.serialization.Model): - """This type describes the requested resources for a given container. It describes the least amount of resources required for the container. A container can consume more than requested resources up to the specified limits before being restarted. Currently, the requested resources are treated as limits. +class ResourceRequests(Model): + """This type describes the requested resources for a given container. It + describes the least amount of resources required for the container. A + container can consume more than requested resources up to the specified + limits before being restarted. Currently, the requested resources are + treated as limits. All required parameters must be populated in order to send to Azure. - :param memory_in_gb: Required. The memory request in GB for this container. + :param memory_in_gb: Required. The memory request in GB for this + container. :type memory_in_gb: float - :param cpu: Required. Requested number of CPU cores. At present, only full cores are supported. + :param cpu: Required. Requested number of CPU cores. At present, only full + cores are supported. :type cpu: float """ @@ -21047,26 +17645,22 @@ class ResourceRequests(msrest.serialization.Model): 'cpu': {'key': 'cpu', 'type': 'float'}, } - def __init__( - self, - *, - memory_in_gb: float, - cpu: float, - **kwargs - ): + def __init__(self, *, memory_in_gb: float, cpu: float, **kwargs) -> None: super(ResourceRequests, self).__init__(**kwargs) self.memory_in_gb = memory_in_gb self.cpu = cpu -class ResourceRequirements(msrest.serialization.Model): +class ResourceRequirements(Model): """This type describes the resource requirements for a container or a service. All required parameters must be populated in order to send to Azure. - :param requests: Required. Describes the requested resources for a given container. + :param requests: Required. Describes the requested resources for a given + container. :type requests: ~azure.servicefabric.models.ResourceRequests - :param limits: Describes the maximum limits on the resources for a given container. + :param limits: Describes the maximum limits on the resources for a given + container. :type limits: ~azure.servicefabric.models.ResourceLimits """ @@ -21079,45 +17673,41 @@ class ResourceRequirements(msrest.serialization.Model): 'limits': {'key': 'limits', 'type': 'ResourceLimits'}, } - def __init__( - self, - *, - requests: "ResourceRequests", - limits: Optional["ResourceLimits"] = None, - **kwargs - ): + def __init__(self, *, requests, limits=None, **kwargs) -> None: super(ResourceRequirements, self).__init__(**kwargs) self.requests = requests self.limits = limits -class RestartDeployedCodePackageDescription(msrest.serialization.Model): - """Defines description for restarting a deployed code package on Service Fabric node. +class RestartDeployedCodePackageDescription(Model): + """Defines description for restarting a deployed code package on Service + Fabric node. All required parameters must be populated in order to send to Azure. - :param service_manifest_name: Required. The name of service manifest that specified this code - package. + :param service_manifest_name: Required. The name of service manifest that + specified this code package. :type service_manifest_name: str - :param service_package_activation_id: The ActivationId of a deployed service package. If - ServicePackageActivationMode specified at the time of creating the service - is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), - then value of ServicePackageActivationId + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param code_package_name: Required. The name of the code package defined in the service - manifest. + :param code_package_name: Required. The name of the code package defined + in the service manifest. :type code_package_name: str - :param code_package_instance_id: Required. The instance ID for currently running entry point. - For a code package setup entry point (if specified) runs first and after it finishes main entry - point is started. - Each time entry point executable is run, its instance ID will change. If 0 is passed in as the - code package instance ID, the API will restart the code package with whatever instance ID it is - currently running. - If an instance ID other than 0 is passed in, the API will restart the code package only if the - current Instance ID matches the passed in instance ID. - Note, passing in the exact instance ID (not 0) in the API is safer, because if ensures at most - one restart of the code package. + :param code_package_instance_id: Required. The instance ID for currently + running entry point. For a code package setup entry point (if specified) + runs first and after it finishes main entry point is started. + Each time entry point executable is run, its instance ID will change. If 0 + is passed in as the code package instance ID, the API will restart the + code package with whatever instance ID it is currently running. + If an instance ID other than 0 is passed in, the API will restart the code + package only if the current Instance ID matches the passed in instance ID. + Note, passing in the exact instance ID (not 0) in the API is safer, + because if ensures at most one restart of the code package. :type code_package_instance_id: str """ @@ -21134,15 +17724,7 @@ class RestartDeployedCodePackageDescription(msrest.serialization.Model): 'code_package_instance_id': {'key': 'CodePackageInstanceId', 'type': 'str'}, } - def __init__( - self, - *, - service_manifest_name: str, - code_package_name: str, - code_package_instance_id: str, - service_package_activation_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, service_manifest_name: str, code_package_name: str, code_package_instance_id: str, service_package_activation_id: str=None, **kwargs) -> None: super(RestartDeployedCodePackageDescription, self).__init__(**kwargs) self.service_manifest_name = service_manifest_name self.service_package_activation_id = service_package_activation_id @@ -21150,19 +17732,22 @@ def __init__( self.code_package_instance_id = code_package_instance_id -class RestartNodeDescription(msrest.serialization.Model): +class RestartNodeDescription(Model): """Describes the parameters to restart a Service Fabric node. All required parameters must be populated in order to send to Azure. - :param node_instance_id: Required. The instance ID of the target node. If instance ID is - specified the node is restarted only if it matches with the current instance of the node. A - default value of "0" would match any instance ID. The instance ID can be obtained using get - node query. + :param node_instance_id: Required. The instance ID of the target node. If + instance ID is specified the node is restarted only if it matches with the + current instance of the node. A default value of "0" would match any + instance ID. The instance ID can be obtained using get node query. Default + value: "0" . :type node_instance_id: str - :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is - case-sensitive. Possible values include: "False", "True". Default value: "False". - :type create_fabric_dump: str or ~azure.servicefabric.models.CreateFabricDump + :param create_fabric_dump: Specify True to create a dump of the fabric + node process. This is case-sensitive. Possible values include: 'False', + 'True'. Default value: "False" . + :type create_fabric_dump: str or + ~azure.servicefabric.models.CreateFabricDump """ _validation = { @@ -21174,26 +17759,21 @@ class RestartNodeDescription(msrest.serialization.Model): 'create_fabric_dump': {'key': 'CreateFabricDump', 'type': 'str'}, } - def __init__( - self, - *, - node_instance_id: str = "0", - create_fabric_dump: Optional[Union[str, "CreateFabricDump"]] = "False", - **kwargs - ): + def __init__(self, *, node_instance_id: str="0", create_fabric_dump="False", **kwargs) -> None: super(RestartNodeDescription, self).__init__(**kwargs) self.node_instance_id = node_instance_id self.create_fabric_dump = create_fabric_dump -class RestartPartitionResult(msrest.serialization.Model): - """Represents information about an operation in a terminal state (Completed or Faulted). +class RestartPartitionResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). - :param error_code: If OperationState is Completed, this is 0. If OperationState is Faulted, - this is an error code indicating the reason. + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. :type error_code: int - :param selected_partition: This class returns information about the partition that the - user-induced operation acted upon. + :param selected_partition: This class returns information about the + partition that the user-induced operation acted upon. :type selected_partition: ~azure.servicefabric.models.SelectedPartition """ @@ -21202,29 +17782,25 @@ class RestartPartitionResult(msrest.serialization.Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__( - self, - *, - error_code: Optional[int] = None, - selected_partition: Optional["SelectedPartition"] = None, - **kwargs - ): + def __init__(self, *, error_code: int=None, selected_partition=None, **kwargs) -> None: super(RestartPartitionResult, self).__init__(**kwargs) self.error_code = error_code self.selected_partition = selected_partition -class RestorePartitionDescription(msrest.serialization.Model): - """Specifies the parameters needed to trigger a restore of a specific partition. +class RestorePartitionDescription(Model): + """Specifies the parameters needed to trigger a restore of a specific + partition. All required parameters must be populated in order to send to Azure. :param backup_id: Required. Unique backup ID. :type backup_id: str - :param backup_location: Required. Location of the backup relative to the backup storage - specified/ configured. + :param backup_location: Required. Location of the backup relative to the + backup storage specified/ configured. :type backup_location: str - :param backup_storage: Location of the backup from where the partition will be restored. + :param backup_storage: Location of the backup from where the partition + will be restored. :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription """ @@ -21239,33 +17815,29 @@ class RestorePartitionDescription(msrest.serialization.Model): 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, } - def __init__( - self, - *, - backup_id: str, - backup_location: str, - backup_storage: Optional["BackupStorageDescription"] = None, - **kwargs - ): + def __init__(self, *, backup_id: str, backup_location: str, backup_storage=None, **kwargs) -> None: super(RestorePartitionDescription, self).__init__(**kwargs) self.backup_id = backup_id self.backup_location = backup_location self.backup_storage = backup_storage -class RestoreProgressInfo(msrest.serialization.Model): +class RestoreProgressInfo(Model): """Describes the progress of a restore operation on a partition. - :param restore_state: Represents the current state of the partition restore operation. Possible - values include: "Invalid", "Accepted", "RestoreInProgress", "Success", "Failure", "Timeout". + :param restore_state: Represents the current state of the partition + restore operation. Possible values include: 'Invalid', 'Accepted', + 'RestoreInProgress', 'Success', 'Failure', 'Timeout' :type restore_state: str or ~azure.servicefabric.models.RestoreState :param time_stamp_utc: Timestamp when operation succeeded or failed. - :type time_stamp_utc: ~datetime.datetime - :param restored_epoch: Describes the epoch at which the partition is restored. + :type time_stamp_utc: datetime + :param restored_epoch: Describes the epoch at which the partition is + restored. :type restored_epoch: ~azure.servicefabric.models.Epoch :param restored_lsn: Restored LSN. :type restored_lsn: str - :param failure_error: Denotes the failure encountered in performing restore operation. + :param failure_error: Denotes the failure encountered in performing + restore operation. :type failure_error: ~azure.servicefabric.models.FabricErrorError """ @@ -21277,16 +17849,7 @@ class RestoreProgressInfo(msrest.serialization.Model): 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, } - def __init__( - self, - *, - restore_state: Optional[Union[str, "RestoreState"]] = None, - time_stamp_utc: Optional[datetime.datetime] = None, - restored_epoch: Optional["Epoch"] = None, - restored_lsn: Optional[str] = None, - failure_error: Optional["FabricErrorError"] = None, - **kwargs - ): + def __init__(self, *, restore_state=None, time_stamp_utc=None, restored_epoch=None, restored_lsn: str=None, failure_error=None, **kwargs) -> None: super(RestoreProgressInfo, self).__init__(**kwargs) self.restore_state = restore_state self.time_stamp_utc = time_stamp_utc @@ -21295,13 +17858,14 @@ def __init__( self.failure_error = failure_error -class ResumeApplicationUpgradeDescription(msrest.serialization.Model): - """Describes the parameters for resuming an unmonitored manual Service Fabric application upgrade. +class ResumeApplicationUpgradeDescription(Model): + """Describes the parameters for resuming an unmonitored manual Service Fabric + application upgrade. All required parameters must be populated in order to send to Azure. - :param upgrade_domain_name: Required. The name of the upgrade domain in which to resume the - upgrade. + :param upgrade_domain_name: Required. The name of the upgrade domain in + which to resume the upgrade. :type upgrade_domain_name: str """ @@ -21313,22 +17877,18 @@ class ResumeApplicationUpgradeDescription(msrest.serialization.Model): 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, } - def __init__( - self, - *, - upgrade_domain_name: str, - **kwargs - ): + def __init__(self, *, upgrade_domain_name: str, **kwargs) -> None: super(ResumeApplicationUpgradeDescription, self).__init__(**kwargs) self.upgrade_domain_name = upgrade_domain_name -class ResumeClusterUpgradeDescription(msrest.serialization.Model): +class ResumeClusterUpgradeDescription(Model): """Describes the parameters for resuming a cluster upgrade. All required parameters must be populated in order to send to Azure. - :param upgrade_domain: Required. The next upgrade domain for this cluster upgrade. + :param upgrade_domain: Required. The next upgrade domain for this cluster + upgrade. :type upgrade_domain: str """ @@ -21340,76 +17900,83 @@ class ResumeClusterUpgradeDescription(msrest.serialization.Model): 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, } - def __init__( - self, - *, - upgrade_domain: str, - **kwargs - ): + def __init__(self, *, upgrade_domain: str, **kwargs) -> None: super(ResumeClusterUpgradeDescription, self).__init__(**kwargs) self.upgrade_domain = upgrade_domain -class RollingUpgradeUpdateDescription(msrest.serialization.Model): - """Describes the parameters for updating a rolling upgrade of application or cluster. +class RollingUpgradeUpdateDescription(Model): + """Describes the parameters for updating a rolling upgrade of application or + cluster. All required parameters must be populated in order to send to Azure. - :param rolling_upgrade_mode: Required. The mode used to monitor health during a rolling - upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values - include: "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: Required. The mode used to monitor health + during a rolling upgrade. The values are UnmonitoredAuto, + UnmonitoredManual, and Monitored. Possible values include: 'Invalid', + 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: + "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param replica_set_check_timeout_in_milliseconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param replica_set_check_timeout_in_milliseconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type replica_set_check_timeout_in_milliseconds: long - :param failure_action: The compensating action to perform when a Monitored upgrade encounters - monitoring policy or health policy violations. - Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will - start rolling back automatically. - Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. Possible - values include: "Invalid", "Rollback", "Manual". + :param failure_action: The compensating action to perform when a Monitored + upgrade encounters monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that + the upgrade will start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade + mode. Possible values include: 'Invalid', 'Rollback', 'Manual' :type failure_action: str or ~azure.servicefabric.models.FailureAction - :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing - an upgrade domain before applying health policies. It is first interpreted as a string - representing an ISO 8601 duration. If that fails, then it is interpreted as a number - representing the total number of milliseconds. + :param health_check_wait_duration_in_milliseconds: The amount of time to + wait after completing an upgrade domain before applying health policies. + It is first interpreted as a string representing an ISO 8601 duration. If + that fails, then it is interpreted as a number representing the total + number of milliseconds. :type health_check_wait_duration_in_milliseconds: str - :param health_check_stable_duration_in_milliseconds: The amount of time that the application or - cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first - interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param health_check_stable_duration_in_milliseconds: The amount of time + that the application or cluster must remain healthy before the upgrade + proceeds to the next upgrade domain. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type health_check_stable_duration_in_milliseconds: str - :param health_check_retry_timeout_in_milliseconds: The amount of time to retry health - evaluation when the application or cluster is unhealthy before FailureAction is executed. It is - first interpreted as a string representing an ISO 8601 duration. If that fails, then it is - interpreted as a number representing the total number of milliseconds. + :param health_check_retry_timeout_in_milliseconds: The amount of time to + retry health evaluation when the application or cluster is unhealthy + before FailureAction is executed. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. :type health_check_retry_timeout_in_milliseconds: str - :param upgrade_timeout_in_milliseconds: The amount of time the overall upgrade has to complete - before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 - duration. If that fails, then it is interpreted as a number representing the total number of + :param upgrade_timeout_in_milliseconds: The amount of time the overall + upgrade has to complete before FailureAction is executed. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, + then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout_in_milliseconds: str - :param upgrade_domain_timeout_in_milliseconds: The amount of time each upgrade domain has to - complete before FailureAction is executed. It is first interpreted as a string representing an - ISO 8601 duration. If that fails, then it is interpreted as a number representing the total - number of milliseconds. + :param upgrade_domain_timeout_in_milliseconds: The amount of time each + upgrade domain has to complete before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that + fails, then it is interpreted as a number representing the total number of + milliseconds. :type upgrade_domain_timeout_in_milliseconds: str - :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a - stateless instance is closed, to allow the active requests to drain gracefully. This would be - effective when the instance is closing during the application/cluster - upgrade, only for those instances which have a non-zero delay duration configured in the - service description. See InstanceCloseDelayDurationSeconds property in $ref: + :param instance_close_delay_duration_in_seconds: Duration in seconds, to + wait before a stateless instance is closed, to allow the active requests + to drain gracefully. This would be effective when the instance is closing + during the application/cluster + upgrade, only for those instances which have a non-zero delay duration + configured in the service description. See + InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates - that the behavior will entirely depend on the delay configured in the stateless service - description. + Note, the default value of InstanceCloseDelayDurationInSeconds is + 4294967295, which indicates that the behavior will entirely depend on the + delay configured in the stateless service description. :type instance_close_delay_duration_in_seconds: long """ @@ -21430,21 +17997,7 @@ class RollingUpgradeUpdateDescription(msrest.serialization.Model): 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, } - def __init__( - self, - *, - rolling_upgrade_mode: Union[str, "UpgradeMode"] = "UnmonitoredAuto", - force_restart: Optional[bool] = False, - replica_set_check_timeout_in_milliseconds: Optional[int] = 42949672925, - failure_action: Optional[Union[str, "FailureAction"]] = None, - health_check_wait_duration_in_milliseconds: Optional[str] = "0", - health_check_stable_duration_in_milliseconds: Optional[str] = "PT0H2M0S", - health_check_retry_timeout_in_milliseconds: Optional[str] = "PT0H10M0S", - upgrade_timeout_in_milliseconds: Optional[str] = "P10675199DT02H48M05.4775807S", - upgrade_domain_timeout_in_milliseconds: Optional[str] = "P10675199DT02H48M05.4775807S", - instance_close_delay_duration_in_seconds: Optional[int] = 4294967295, - **kwargs - ): + def __init__(self, *, rolling_upgrade_mode="UnmonitoredAuto", force_restart: bool=None, replica_set_check_timeout_in_milliseconds: int=None, failure_action=None, health_check_wait_duration_in_milliseconds: str=None, health_check_stable_duration_in_milliseconds: str=None, health_check_retry_timeout_in_milliseconds: str=None, upgrade_timeout_in_milliseconds: str=None, upgrade_domain_timeout_in_milliseconds: str=None, instance_close_delay_duration_in_seconds: int=None, **kwargs) -> None: super(RollingUpgradeUpdateDescription, self).__init__(**kwargs) self.rolling_upgrade_mode = rolling_upgrade_mode self.force_restart = force_restart @@ -21459,15 +18012,19 @@ def __init__( class RunToCompletionExecutionPolicy(ExecutionPolicy): - """The run to completion execution policy, the service will perform its desired operation and complete successfully. If the service encounters failure, it will restarted based on restart policy specified. If the service completes its operation successfully, it will not be restarted again. + """The run to completion execution policy, the service will perform its + desired operation and complete successfully. If the service encounters + failure, it will restarted based on restart policy specified. If the + service completes its operation successfully, it will not be restarted + again. All required parameters must be populated in order to send to Azure. - :param type: Required. Enumerates the execution policy types for services.Constant filled by - server. Possible values include: "Default", "RunToCompletion". - :type type: str or ~azure.servicefabric.models.ExecutionPolicyType - :param restart: Required. Enumerates the restart policy for RunToCompletionExecutionPolicy. - Possible values include: "OnFailure", "Never". + :param type: Required. Constant filled by server. + :type type: str + :param restart: Required. Enumerates the restart policy for + RunToCompletionExecutionPolicy. Possible values include: 'OnFailure', + 'Never' :type restart: str or ~azure.servicefabric.models.RestartPolicy """ @@ -21481,23 +18038,20 @@ class RunToCompletionExecutionPolicy(ExecutionPolicy): 'restart': {'key': 'restart', 'type': 'str'}, } - def __init__( - self, - *, - restart: Union[str, "RestartPolicy"], - **kwargs - ): + def __init__(self, *, restart, **kwargs) -> None: super(RunToCompletionExecutionPolicy, self).__init__(**kwargs) - self.type = 'RunToCompletion' # type: str self.restart = restart + self.type = 'RunToCompletion' -class SafetyCheckWrapper(msrest.serialization.Model): - """A wrapper for the safety check object. Safety checks are performed by service fabric before continuing with the operations. These checks ensure the availability of the service and the reliability of the state. +class SafetyCheckWrapper(Model): + """A wrapper for the safety check object. Safety checks are performed by + service fabric before continuing with the operations. These checks ensure + the availability of the service and the reliability of the state. - :param safety_check: Represents a safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. + :param safety_check: Represents a safety check performed by service fabric + before continuing with the operations. These checks ensure the + availability of the service and the reliability of the state. :type safety_check: ~azure.servicefabric.models.SafetyCheck """ @@ -21505,26 +18059,24 @@ class SafetyCheckWrapper(msrest.serialization.Model): 'safety_check': {'key': 'SafetyCheck', 'type': 'SafetyCheck'}, } - def __init__( - self, - *, - safety_check: Optional["SafetyCheck"] = None, - **kwargs - ): + def __init__(self, *, safety_check=None, **kwargs) -> None: super(SafetyCheckWrapper, self).__init__(**kwargs) self.safety_check = safety_check -class ScalingPolicyDescription(msrest.serialization.Model): +class ScalingPolicyDescription(Model): """Describes how the scaling should be performed. All required parameters must be populated in order to send to Azure. - :param scaling_trigger: Required. Specifies the trigger associated with this scaling policy. - :type scaling_trigger: ~azure.servicefabric.models.ScalingTriggerDescription - :param scaling_mechanism: Required. Specifies the mechanism associated with this scaling - policy. - :type scaling_mechanism: ~azure.servicefabric.models.ScalingMechanismDescription + :param scaling_trigger: Required. Specifies the trigger associated with + this scaling policy + :type scaling_trigger: + ~azure.servicefabric.models.ScalingTriggerDescription + :param scaling_mechanism: Required. Specifies the mechanism associated + with this scaling policy + :type scaling_mechanism: + ~azure.servicefabric.models.ScalingMechanismDescription """ _validation = { @@ -21537,50 +18089,49 @@ class ScalingPolicyDescription(msrest.serialization.Model): 'scaling_mechanism': {'key': 'ScalingMechanism', 'type': 'ScalingMechanismDescription'}, } - def __init__( - self, - *, - scaling_trigger: "ScalingTriggerDescription", - scaling_mechanism: "ScalingMechanismDescription", - **kwargs - ): + def __init__(self, *, scaling_trigger, scaling_mechanism, **kwargs) -> None: super(ScalingPolicyDescription, self).__init__(**kwargs) self.scaling_trigger = scaling_trigger self.scaling_mechanism = scaling_mechanism class SecondaryReplicatorStatus(ReplicatorStatus): - """Provides statistics about the Service Fabric Replicator, when it is functioning in a ActiveSecondary role. + """Provides statistics about the Service Fabric Replicator, when it is + functioning in a ActiveSecondary role. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SecondaryActiveReplicatorStatus, SecondaryIdleReplicatorStatus. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole - :param replication_queue_status: Details about the replication queue on the secondary - replicator. - :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a - replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation message was never - received. - :type last_replication_operation_received_time_utc: ~datetime.datetime - :param is_in_build: Value that indicates whether the replica is currently being built. + sub-classes are: SecondaryActiveReplicatorStatus, + SecondaryIdleReplicatorStatus + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the secondary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp + (UTC) at which a replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation + message was never received. + :type last_replication_operation_received_time_utc: datetime + :param is_in_build: Value that indicates whether the replica is currently + being built. :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary replicator. + :param copy_queue_status: Details about the copy queue on the secondary + replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy - operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation message was never - received. - :type last_copy_operation_received_time_utc: ~datetime.datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment - was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. - :type last_acknowledgement_sent_time_utc: ~datetime.datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at + which a copy operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation + message was never received. + :type last_copy_operation_received_time_utc: datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at + which an acknowledgment was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment + message was never sent. + :type last_acknowledgement_sent_time_utc: datetime """ _validation = { @@ -21601,56 +18152,50 @@ class SecondaryReplicatorStatus(ReplicatorStatus): 'kind': {'ActiveSecondary': 'SecondaryActiveReplicatorStatus', 'IdleSecondary': 'SecondaryIdleReplicatorStatus'} } - def __init__( - self, - *, - replication_queue_status: Optional["ReplicatorQueueStatus"] = None, - last_replication_operation_received_time_utc: Optional[datetime.datetime] = None, - is_in_build: Optional[bool] = None, - copy_queue_status: Optional["ReplicatorQueueStatus"] = None, - last_copy_operation_received_time_utc: Optional[datetime.datetime] = None, - last_acknowledgement_sent_time_utc: Optional[datetime.datetime] = None, - **kwargs - ): + def __init__(self, *, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build: bool=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None, **kwargs) -> None: super(SecondaryReplicatorStatus, self).__init__(**kwargs) - self.kind = 'SecondaryReplicatorStatus' # type: str self.replication_queue_status = replication_queue_status self.last_replication_operation_received_time_utc = last_replication_operation_received_time_utc self.is_in_build = is_in_build self.copy_queue_status = copy_queue_status self.last_copy_operation_received_time_utc = last_copy_operation_received_time_utc self.last_acknowledgement_sent_time_utc = last_acknowledgement_sent_time_utc + self.kind = 'SecondaryReplicatorStatus' class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): - """Status of the secondary replicator when it is in active mode and is part of the replica set. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole - :param replication_queue_status: Details about the replication queue on the secondary - replicator. - :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a - replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation message was never - received. - :type last_replication_operation_received_time_utc: ~datetime.datetime - :param is_in_build: Value that indicates whether the replica is currently being built. + """Status of the secondary replicator when it is in active mode and is part of + the replica set. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the secondary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp + (UTC) at which a replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation + message was never received. + :type last_replication_operation_received_time_utc: datetime + :param is_in_build: Value that indicates whether the replica is currently + being built. :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary replicator. + :param copy_queue_status: Details about the copy queue on the secondary + replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy - operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation message was never - received. - :type last_copy_operation_received_time_utc: ~datetime.datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment - was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. - :type last_acknowledgement_sent_time_utc: ~datetime.datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at + which a copy operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation + message was never received. + :type last_copy_operation_received_time_utc: datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at + which an acknowledgment was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment + message was never sent. + :type last_acknowledgement_sent_time_utc: datetime """ _validation = { @@ -21667,50 +18212,44 @@ class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, } - def __init__( - self, - *, - replication_queue_status: Optional["ReplicatorQueueStatus"] = None, - last_replication_operation_received_time_utc: Optional[datetime.datetime] = None, - is_in_build: Optional[bool] = None, - copy_queue_status: Optional["ReplicatorQueueStatus"] = None, - last_copy_operation_received_time_utc: Optional[datetime.datetime] = None, - last_acknowledgement_sent_time_utc: Optional[datetime.datetime] = None, - **kwargs - ): + def __init__(self, *, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build: bool=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None, **kwargs) -> None: super(SecondaryActiveReplicatorStatus, self).__init__(replication_queue_status=replication_queue_status, last_replication_operation_received_time_utc=last_replication_operation_received_time_utc, is_in_build=is_in_build, copy_queue_status=copy_queue_status, last_copy_operation_received_time_utc=last_copy_operation_received_time_utc, last_acknowledgement_sent_time_utc=last_acknowledgement_sent_time_utc, **kwargs) - self.kind = 'ActiveSecondary' # type: str + self.kind = 'ActiveSecondary' class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): - """Status of the secondary replicator when it is in idle mode and is being built by the primary. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The role of a replica of a stateful service.Constant filled by server. - Possible values include: "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". - :type kind: str or ~azure.servicefabric.models.ReplicaRole - :param replication_queue_status: Details about the replication queue on the secondary - replicator. - :type replication_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_replication_operation_received_time_utc: The last time-stamp (UTC) at which a - replication operation was received from the primary. - UTC 0 represents an invalid value, indicating that a replication operation message was never - received. - :type last_replication_operation_received_time_utc: ~datetime.datetime - :param is_in_build: Value that indicates whether the replica is currently being built. + """Status of the secondary replicator when it is in idle mode and is being + built by the primary. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the secondary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp + (UTC) at which a replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation + message was never received. + :type last_replication_operation_received_time_utc: datetime + :param is_in_build: Value that indicates whether the replica is currently + being built. :type is_in_build: bool - :param copy_queue_status: Details about the copy queue on the secondary replicator. + :param copy_queue_status: Details about the copy queue on the secondary + replicator. :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus - :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at which a copy - operation was received from the primary. - UTC 0 represents an invalid value, indicating that a copy operation message was never - received. - :type last_copy_operation_received_time_utc: ~datetime.datetime - :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at which an acknowledgment - was sent to the primary replicator. - UTC 0 represents an invalid value, indicating that an acknowledgment message was never sent. - :type last_acknowledgement_sent_time_utc: ~datetime.datetime + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at + which a copy operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation + message was never received. + :type last_copy_operation_received_time_utc: datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at + which an acknowledgment was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment + message was never sent. + :type last_acknowledgement_sent_time_utc: datetime """ _validation = { @@ -21727,27 +18266,18 @@ class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, } - def __init__( - self, - *, - replication_queue_status: Optional["ReplicatorQueueStatus"] = None, - last_replication_operation_received_time_utc: Optional[datetime.datetime] = None, - is_in_build: Optional[bool] = None, - copy_queue_status: Optional["ReplicatorQueueStatus"] = None, - last_copy_operation_received_time_utc: Optional[datetime.datetime] = None, - last_acknowledgement_sent_time_utc: Optional[datetime.datetime] = None, - **kwargs - ): + def __init__(self, *, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build: bool=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None, **kwargs) -> None: super(SecondaryIdleReplicatorStatus, self).__init__(replication_queue_status=replication_queue_status, last_replication_operation_received_time_utc=last_replication_operation_received_time_utc, is_in_build=is_in_build, copy_queue_status=copy_queue_status, last_copy_operation_received_time_utc=last_copy_operation_received_time_utc, last_acknowledgement_sent_time_utc=last_acknowledgement_sent_time_utc, **kwargs) - self.kind = 'IdleSecondary' # type: str + self.kind = 'IdleSecondary' -class SecretResourceDescription(msrest.serialization.Model): +class SecretResourceDescription(Model): """This type describes a secret resource. All required parameters must be populated in order to send to Azure. - :param properties: Required. Describes the properties of a secret resource. + :param properties: Required. Describes the properties of a secret + resource. :type properties: ~azure.servicefabric.models.SecretResourceProperties :param name: Required. Name of the Secret resource. :type name: str @@ -21763,19 +18293,13 @@ class SecretResourceDescription(msrest.serialization.Model): 'name': {'key': 'name', 'type': 'str'}, } - def __init__( - self, - *, - properties: "SecretResourceProperties", - name: str, - **kwargs - ): + def __init__(self, *, properties, name: str, **kwargs) -> None: super(SecretResourceDescription, self).__init__(**kwargs) self.properties = properties self.name = name -class SecretValue(msrest.serialization.Model): +class SecretValue(Model): """This type represents the unencrypted value of the secret. :param value: The actual value of the secret. @@ -21786,17 +18310,12 @@ class SecretValue(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - *, - value: Optional[str] = None, - **kwargs - ): + def __init__(self, *, value: str=None, **kwargs) -> None: super(SecretValue, self).__init__(**kwargs) self.value = value -class SecretValueProperties(msrest.serialization.Model): +class SecretValueProperties(Model): """This type describes properties of secret value resource. :param value: The actual value of the secret. @@ -21807,18 +18326,14 @@ class SecretValueProperties(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - *, - value: Optional[str] = None, - **kwargs - ): + def __init__(self, *, value: str=None, **kwargs) -> None: super(SecretValueProperties, self).__init__(**kwargs) self.value = value -class SecretValueResourceDescription(msrest.serialization.Model): - """This type describes a value of a secret resource. The name of this resource is the version identifier corresponding to this secret value. +class SecretValueResourceDescription(Model): + """This type describes a value of a secret resource. The name of this resource + is the version identifier corresponding to this secret value. All required parameters must be populated in order to send to Azure. @@ -21837,49 +18352,20 @@ class SecretValueResourceDescription(msrest.serialization.Model): 'value': {'key': 'properties.value', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - value: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str, value: str=None, **kwargs) -> None: super(SecretValueResourceDescription, self).__init__(**kwargs) self.name = name self.value = value -class SecretValueResourceProperties(SecretValueProperties): - """This type describes properties of a secret value resource. - - :param value: The actual value of the secret. - :type value: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__( - self, - *, - value: Optional[str] = None, - **kwargs - ): - super(SecretValueResourceProperties, self).__init__(value=value, **kwargs) - - class SeedNodeSafetyCheck(SafetyCheck): - """Represents a safety check for the seed nodes being performed by service fabric before continuing with node level operations. + """Represents a safety check for the seed nodes being performed by service + fabric before continuing with node level operations. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -21890,23 +18376,22 @@ class SeedNodeSafetyCheck(SafetyCheck): 'kind': {'key': 'Kind', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(SeedNodeSafetyCheck, self).__init__(**kwargs) - self.kind = 'EnsureSeedNodeQuorum' # type: str + self.kind = 'EnsureSeedNodeQuorum' -class SelectedPartition(msrest.serialization.Model): - """This class returns information about the partition that the user-induced operation acted upon. +class SelectedPartition(Model): + """This class returns information about the partition that the user-induced + operation acted upon. :param service_name: The name of the service the partition belongs to. :type service_name: str - :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. - This is a randomly generated GUID when the service was created. The partition ID is unique and - does not change for the lifetime of the service. If the same service was deleted and recreated - the IDs of its partitions would be different. + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. :type partition_id: str """ @@ -21915,36 +18400,33 @@ class SelectedPartition(msrest.serialization.Model): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - service_name: Optional[str] = None, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, partition_id: str=None, **kwargs) -> None: super(SelectedPartition, self).__init__(**kwargs) self.service_name = service_name self.partition_id = partition_id class ServiceBackupConfigurationInfo(BackupConfigurationInfo): - """Backup configuration information for a specific Service Fabric service specifying what backup policy is being applied and suspend description, if any. + """Backup configuration information for a specific Service Fabric service + specifying what backup policy is being applied and suspend description, if + any. All required parameters must be populated in order to send to Azure. - :param kind: Required. The entity type of a Service Fabric entity such as Application, Service - or a Partition where periodic backups can be enabled.Constant filled by server. Possible - values include: "Invalid", "Partition", "Service", "Application". - :type kind: str or ~azure.servicefabric.models.BackupEntityKind - :param policy_name: The name of the backup policy which is applicable to this Service Fabric - application or service or partition. + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. :type policy_name: str - :param policy_inherited_from: Specifies the scope at which the backup policy is applied. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type policy_inherited_from: str or ~azure.servicefabric.models.BackupPolicyScope + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope :param suspension_info: Describes the backup suspension details. :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str """ @@ -21953,25 +18435,17 @@ class ServiceBackupConfigurationInfo(BackupConfigurationInfo): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'policy_name': {'key': 'PolicyName', 'type': 'str'}, 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__( - self, - *, - policy_name: Optional[str] = None, - policy_inherited_from: Optional[Union[str, "BackupPolicyScope"]] = None, - suspension_info: Optional["BackupSuspensionInfo"] = None, - service_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, service_name: str=None, **kwargs) -> None: super(ServiceBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info, **kwargs) - self.kind = 'Service' # type: str self.service_name = service_name + self.kind = 'Service' class ServiceBackupEntity(BackupEntity): @@ -21979,11 +18453,10 @@ class ServiceBackupEntity(BackupEntity): All required parameters must be populated in order to send to Azure. - :param entity_kind: Required. The entity type of a Service Fabric entity such as Application, - Service or a Partition where periodic backups can be enabled.Constant filled by server. - Possible values include: "Invalid", "Partition", "Service", "Application". - :type entity_kind: str or ~azure.servicefabric.models.BackupEntityKind - :param service_name: The full name of the service with 'fabric:' URI scheme. + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. :type service_name: str """ @@ -21996,28 +18469,24 @@ class ServiceBackupEntity(BackupEntity): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__( - self, - *, - service_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, **kwargs) -> None: super(ServiceBackupEntity, self).__init__(**kwargs) - self.entity_kind = 'Service' # type: str self.service_name = service_name + self.entity_kind = 'Service' -class ServiceCorrelationDescription(msrest.serialization.Model): +class ServiceCorrelationDescription(Model): """Creates a particular correlation between services. All required parameters must be populated in order to send to Azure. - :param scheme: Required. The ServiceCorrelationScheme which describes the relationship between - this service and the service specified via ServiceName. Possible values include: "Invalid", - "Affinity", "AlignedAffinity", "NonAlignedAffinity". + :param scheme: Required. The ServiceCorrelationScheme which describes the + relationship between this service and the service specified via + ServiceName. Possible values include: 'Invalid', 'Affinity', + 'AlignedAffinity', 'NonAlignedAffinity' :type scheme: str or ~azure.servicefabric.models.ServiceCorrelationScheme - :param service_name: Required. The name of the service that the correlation relationship is - established with. + :param service_name: Required. The name of the service that the + correlation relationship is established with. :type service_name: str """ @@ -22031,13 +18500,7 @@ class ServiceCorrelationDescription(msrest.serialization.Model): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__( - self, - *, - scheme: Union[str, "ServiceCorrelationScheme"], - service_name: str, - **kwargs - ): + def __init__(self, *, scheme, service_name: str, **kwargs) -> None: super(ServiceCorrelationDescription, self).__init__(**kwargs) self.scheme = scheme self.service_name = service_name @@ -22047,84 +18510,57 @@ class ServiceEvent(FabricEvent): """Represents the base for all Service Events. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, ServiceHealthReportExpiredEvent, ServiceNewHealthReportEvent. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, + ServiceNewHealthReportEvent, ServiceHealthReportExpiredEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, } _subtype_map = { - 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent', 'ServiceNewHealthReport': 'ServiceNewHealthReportEvent'} - } - - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - service_id: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceNewHealthReport': 'ServiceNewHealthReportEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ServiceEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) - self.kind = 'ServiceEvent' # type: str self.service_id = service_id + self.kind = 'ServiceEvent' class ServiceCreatedEvent(ServiceEvent): @@ -22132,44 +18568,25 @@ class ServiceCreatedEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str :param service_type_name: Required. Service type name. :type service_type_name: str @@ -22189,17 +18606,18 @@ class ServiceCreatedEvent(ServiceEvent): :type min_replica_set_size: int :param service_package_version: Required. Version of Service package. :type service_package_version: str - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, 'service_type_name': {'required': True}, 'application_name': {'required': True}, @@ -22214,11 +18632,11 @@ class ServiceCreatedEvent(ServiceEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, @@ -22232,28 +18650,8 @@ class ServiceCreatedEvent(ServiceEvent): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - service_id: str, - service_type_name: str, - application_name: str, - application_type_name: str, - service_instance: int, - is_stateful: bool, - partition_count: int, - target_replica_set_size: int, - min_replica_set_size: int, - service_package_version: str, - partition_id: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, service_type_name: str, application_name: str, application_type_name: str, service_instance: int, is_stateful: bool, partition_count: int, target_replica_set_size: int, min_replica_set_size: int, service_package_version: str, partition_id: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ServiceCreatedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) - self.kind = 'ServiceCreated' # type: str self.service_type_name = service_type_name self.application_name = application_name self.application_type_name = application_type_name @@ -22264,6 +18662,7 @@ def __init__( self.min_replica_set_size = min_replica_set_size self.service_package_version = service_package_version self.partition_id = partition_id + self.kind = 'ServiceCreated' class ServiceDeletedEvent(ServiceEvent): @@ -22271,44 +18670,25 @@ class ServiceDeletedEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str :param service_type_name: Required. Service type name. :type service_type_name: str @@ -22331,9 +18711,9 @@ class ServiceDeletedEvent(ServiceEvent): """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, 'service_type_name': {'required': True}, 'application_name': {'required': True}, @@ -22347,11 +18727,11 @@ class ServiceDeletedEvent(ServiceEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, @@ -22364,27 +18744,8 @@ class ServiceDeletedEvent(ServiceEvent): 'service_package_version': {'key': 'ServicePackageVersion', 'type': 'str'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - service_id: str, - service_type_name: str, - application_name: str, - application_type_name: str, - service_instance: int, - is_stateful: bool, - partition_count: int, - target_replica_set_size: int, - min_replica_set_size: int, - service_package_version: str, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, service_type_name: str, application_name: str, application_type_name: str, service_instance: int, is_stateful: bool, partition_count: int, target_replica_set_size: int, min_replica_set_size: int, service_package_version: str, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ServiceDeletedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) - self.kind = 'ServiceDeleted' # type: str self.service_type_name = service_type_name self.application_name = application_name self.application_type_name = application_type_name @@ -22394,72 +18755,85 @@ def __init__( self.target_replica_set_size = target_replica_set_size self.min_replica_set_size = min_replica_set_size self.service_package_version = service_package_version + self.kind = 'ServiceDeleted' -class ServiceDescription(msrest.serialization.Model): - """A ServiceDescription contains all of the information necessary to create a service. +class ServiceDescription(Model): + """A ServiceDescription contains all of the information necessary to create a + service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceDescription, StatelessServiceDescription. + sub-classes are: StatefulServiceDescription, StatelessServiceDescription All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified in the service - manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. Initialization data - is passed to service instances or replicas when they are created. + :param initialization_data: The initialization data as an array of bytes. + Initialization data is passed to service instances or replicas when they + are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an object. - :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an + object. + :type partition_description: + ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost + property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service package to be used for a - service. Possible values include: "SharedProcess", "ExclusiveProcess". + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS system service to be - enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param tags_required_to_place: Tags for placement of this service. - :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_place: + ~azure.servicefabric.models.NodeTagsDescription :param tags_required_to_run: Tags for running of this service. - :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_run: + ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { - 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, + 'service_kind': {'required': True}, } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -22476,35 +18850,15 @@ class ServiceDescription(msrest.serialization.Model): 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceDescription', 'Stateless': 'StatelessServiceDescription'} } - def __init__( - self, - *, - service_name: str, - service_type_name: str, - partition_description: "PartitionSchemeDescription", - application_name: Optional[str] = None, - initialization_data: Optional[List[int]] = None, - placement_constraints: Optional[str] = None, - correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, - service_load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, - service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, - default_move_cost: Optional[Union[str, "MoveCost"]] = None, - is_default_move_cost_specified: Optional[bool] = None, - service_package_activation_mode: Optional[Union[str, "ServicePackageActivationMode"]] = None, - service_dns_name: Optional[str] = None, - scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, - tags_required_to_place: Optional["NodeTagsDescription"] = None, - tags_required_to_run: Optional["NodeTagsDescription"] = None, - **kwargs - ): + def __init__(self, *, service_name: str, service_type_name: str, partition_description, application_name: str=None, initialization_data=None, placement_constraints: str=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified: bool=None, service_package_activation_mode=None, service_dns_name: str=None, scaling_policies=None, tags_required_to_place=None, tags_required_to_run=None, **kwargs) -> None: super(ServiceDescription, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.application_name = application_name self.service_name = service_name self.service_type_name = service_type_name @@ -22521,29 +18875,34 @@ def __init__( self.scaling_policies = scaling_policies self.tags_required_to_place = tags_required_to_place self.tags_required_to_run = tags_required_to_run + self.service_kind = None -class ServiceFromTemplateDescription(msrest.serialization.Model): - """Defines description for creating a Service Fabric service from a template defined in the application manifest. +class ServiceFromTemplateDescription(Model): + """Defines description for creating a Service Fabric service from a template + defined in the application manifest. All required parameters must be populated in order to send to Azure. - :param application_name: Required. The name of the application, including the 'fabric:' URI - scheme. + :param application_name: Required. The name of the application, including + the 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified in the service - manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str - :param initialization_data: The initialization data for the newly created service instance. + :param initialization_data: The initialization data for the newly created + service instance. :type initialization_data: list[int] - :param service_package_activation_mode: The activation mode of service package to be used for a - service. Possible values include: "SharedProcess", "ExclusiveProcess". + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS system service to be - enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. :type service_dns_name: str """ @@ -22562,17 +18921,7 @@ class ServiceFromTemplateDescription(msrest.serialization.Model): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, } - def __init__( - self, - *, - application_name: str, - service_name: str, - service_type_name: str, - initialization_data: Optional[List[int]] = None, - service_package_activation_mode: Optional[Union[str, "ServicePackageActivationMode"]] = None, - service_dns_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, application_name: str, service_name: str, service_type_name: str, initialization_data=None, service_package_activation_mode=None, service_dns_name: str=None, **kwargs) -> None: super(ServiceFromTemplateDescription, self).__init__(**kwargs) self.application_name = application_name self.service_name = service_name @@ -22585,26 +18934,30 @@ def __init__( class ServiceHealth(EntityHealth): """Information about the health of a Service Fabric service. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param name: The name of the service whose health information is described by this object. + :param name: The name of the service whose health information is described + by this object. :type name: str - :param partition_health_states: The list of partition health states associated with the - service. - :type partition_health_states: list[~azure.servicefabric.models.PartitionHealthState] + :param partition_health_states: The list of partition health states + associated with the service. + :type partition_health_states: + list[~azure.servicefabric.models.PartitionHealthState] """ _attribute_map = { @@ -22616,50 +18969,40 @@ class ServiceHealth(EntityHealth): 'partition_health_states': {'key': 'PartitionHealthStates', 'type': '[PartitionHealthState]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - name: Optional[str] = None, - partition_health_states: Optional[List["PartitionHealthState"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, partition_health_states=None, **kwargs) -> None: super(ServiceHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) self.name = name self.partition_health_states = partition_health_states class ServiceHealthEvaluation(HealthEvaluation): - """Represents health evaluation for a service, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for a service, containing information about + the data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param service_name: Name of the service whose health evaluation is described by this object. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: Name of the service whose health evaluation is + described by this object. :type service_name: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the service. The types of the unhealthy evaluations can be - PartitionsHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the service. The types of the + unhealthy evaluations can be PartitionsHealthEvaluation or + EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -22667,26 +19010,18 @@ class ServiceHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - service_name: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, service_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: super(ServiceHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Service' # type: str self.service_name = service_name self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Service' class ServiceHealthReportExpiredEvent(ServiceEvent): @@ -22694,44 +19029,25 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str :param instance_id: Required. Id of Service instance. :type instance_id: long @@ -22747,16 +19063,17 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, 'instance_id': {'required': True}, 'source_id': {'required': True}, @@ -22770,11 +19087,11 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -22787,27 +19104,8 @@ class ServiceHealthReportExpiredEvent(ServiceEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - service_id: str, - instance_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ServiceHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) - self.kind = 'ServiceHealthReportExpired' # type: str self.instance_id = instance_id self.source_id = source_id self.property = property @@ -22817,16 +19115,21 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ServiceHealthReportExpired' class ServiceHealthState(EntityHealthState): - """Represents the health state of a service, which contains the service identifier and its aggregated health state. - - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param service_name: Name of the service whose health state is represented by this object. + """Represents the health state of a service, which contains the service + identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param service_name: Name of the service whose health state is represented + by this object. :type service_name: str """ @@ -22835,30 +19138,28 @@ class ServiceHealthState(EntityHealthState): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - service_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, service_name: str=None, **kwargs) -> None: super(ServiceHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.service_name = service_name class ServiceHealthStateChunk(EntityHealthStateChunk): - """Represents the health state chunk of a service, which contains the service name, its aggregated health state and any partitions that respect the filters in the cluster health chunk query description. + """Represents the health state chunk of a service, which contains the service + name, its aggregated health state and any partitions that respect the + filters in the cluster health chunk query description. - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_name: The name of the service whose health state chunk is provided in this - object. + :param service_name: The name of the service whose health state chunk is + provided in this object. :type service_name: str - :param partition_health_state_chunks: The list of partition health state chunks belonging to - the service that respect the filters in the cluster health chunk query description. - :type partition_health_state_chunks: ~azure.servicefabric.models.PartitionHealthStateChunkList + :param partition_health_state_chunks: The list of partition health state + chunks belonging to the service that respect the filters in the cluster + health chunk query description. + :type partition_health_state_chunks: + ~azure.servicefabric.models.PartitionHealthStateChunkList """ _attribute_map = { @@ -22867,24 +19168,18 @@ class ServiceHealthStateChunk(EntityHealthStateChunk): 'partition_health_state_chunks': {'key': 'PartitionHealthStateChunks', 'type': 'PartitionHealthStateChunkList'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - service_name: Optional[str] = None, - partition_health_state_chunks: Optional["PartitionHealthStateChunkList"] = None, - **kwargs - ): + def __init__(self, *, health_state=None, service_name: str=None, partition_health_state_chunks=None, **kwargs) -> None: super(ServiceHealthStateChunk, self).__init__(health_state=health_state, **kwargs) self.service_name = service_name self.partition_health_state_chunks = partition_health_state_chunks -class ServiceHealthStateChunkList(msrest.serialization.Model): - """The list of service health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. +class ServiceHealthStateChunkList(Model): + """The list of service health state chunks that respect the input filters in + the chunk query. Returned by get cluster health state chunks query. - :param items: The list of service health state chunks that respect the input filters in the - chunk query. + :param items: The list of service health state chunks that respect the + input filters in the chunk query. :type items: list[~azure.servicefabric.models.ServiceHealthStateChunk] """ @@ -22892,60 +19187,67 @@ class ServiceHealthStateChunkList(msrest.serialization.Model): 'items': {'key': 'Items', 'type': '[ServiceHealthStateChunk]'}, } - def __init__( - self, - *, - items: Optional[List["ServiceHealthStateChunk"]] = None, - **kwargs - ): + def __init__(self, *, items=None, **kwargs) -> None: super(ServiceHealthStateChunkList, self).__init__(**kwargs) self.items = items -class ServiceHealthStateFilter(msrest.serialization.Model): - """Defines matching criteria to determine whether a service should be included as a child of an application in the cluster health chunk. -The services are only returned if the parent application matches a filter specified in the cluster health chunk query description. -One filter can match zero, one or multiple services, depending on its properties. - - :param service_name_filter: The name of the service that matches the filter. The filter is - applied only to the specified service, if it exists. - If the service doesn't exist, no service is returned in the cluster health chunk based on this - filter. - If the service exists, it is included as the application's child if the health state matches - the other filter properties. - If not specified, all services that match the parent filters (if any) are taken into - consideration and matched against the other filter members, like health state filter. +class ServiceHealthStateFilter(Model): + """Defines matching criteria to determine whether a service should be included + as a child of an application in the cluster health chunk. + The services are only returned if the parent application matches a filter + specified in the cluster health chunk query description. + One filter can match zero, one or multiple services, depending on its + properties. + + :param service_name_filter: The name of the service that matches the + filter. The filter is applied only to the specified service, if it exists. + If the service doesn't exist, no service is returned in the cluster health + chunk based on this filter. + If the service exists, it is included as the application's child if the + health state matches the other filter properties. + If not specified, all services that match the parent filters (if any) are + taken into consideration and matched against the other filter members, + like health state filter. :type service_name_filter: str - :param health_state_filter: The filter for the health state of the services. It allows - selecting services if they match the desired health states. - The possible values are integer value of one of the following health states. Only services - that match the filter are returned. All services are used to evaluate the cluster aggregated - health state. - If not specified, default value is None, unless the service name is specified. If the filter - has default value and service name is specified, the matching service is returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6, it matches services with HealthState value of OK (2) - and Warning (4). - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. + :param health_state_filter: The filter for the health state of the + services. It allows selecting services if they match the desired health + states. + The possible values are integer value of one of the following health + states. Only services that match the filter are returned. All services are + used to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the service name is + specified. If the filter has default value and service name is specified, + the matching service is returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches services with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . :type health_state_filter: int - :param partition_filters: Defines a list of filters that specify which partitions to be - included in the returned cluster health chunk as children of the service. The partitions are - returned only if the parent service matches a filter. - If the list is empty, no partitions are returned. All the partitions are used to evaluate the - parent service aggregated health state, regardless of the input filters. + :param partition_filters: Defines a list of filters that specify which + partitions to be included in the returned cluster health chunk as children + of the service. The partitions are returned only if the parent service + matches a filter. + If the list is empty, no partitions are returned. All the partitions are + used to evaluate the parent service aggregated health state, regardless of + the input filters. The service filter may specify multiple partition filters. - For example, it can specify a filter to return all partitions with health state Error and - another filter to always include a partition identified by its partition ID. - :type partition_filters: list[~azure.servicefabric.models.PartitionHealthStateFilter] + For example, it can specify a filter to return all partitions with health + state Error and another filter to always include a partition identified by + its partition ID. + :type partition_filters: + list[~azure.servicefabric.models.PartitionHealthStateFilter] """ _attribute_map = { @@ -22954,21 +19256,14 @@ class ServiceHealthStateFilter(msrest.serialization.Model): 'partition_filters': {'key': 'PartitionFilters', 'type': '[PartitionHealthStateFilter]'}, } - def __init__( - self, - *, - service_name_filter: Optional[str] = None, - health_state_filter: Optional[int] = 0, - partition_filters: Optional[List["PartitionHealthStateFilter"]] = None, - **kwargs - ): + def __init__(self, *, service_name_filter: str=None, health_state_filter: int=0, partition_filters=None, **kwargs) -> None: super(ServiceHealthStateFilter, self).__init__(**kwargs) self.service_name_filter = service_name_filter self.health_state_filter = health_state_filter self.partition_filters = partition_filters -class ServiceIdentity(msrest.serialization.Model): +class ServiceIdentity(Model): """Map service identity friendly name to an application identity. :param name: The identity friendly name. @@ -22982,51 +19277,47 @@ class ServiceIdentity(msrest.serialization.Model): 'identity_ref': {'key': 'identityRef', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - identity_ref: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str=None, identity_ref: str=None, **kwargs) -> None: super(ServiceIdentity, self).__init__(**kwargs) self.name = name self.identity_ref = identity_ref -class ServiceInfo(msrest.serialization.Model): +class ServiceInfo(Model): """Information about a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceInfo, StatelessServiceInfo. + sub-classes are: StatefulServiceInfo, StatelessServiceInfo All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded representation of the service - name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param id: The identity of the service. This ID is an encoded + representation of the service name. This is used in the REST APIs to + identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type id: str - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service manifest. + :param type_name: Name of the service type as specified in the service + manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values include: "Unknown", - "Active", "Upgrading", "Deleting", "Creating", "Failed". + :param service_status: The status of the application. Possible values + include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', + 'Failed' :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -23035,62 +19326,55 @@ class ServiceInfo(msrest.serialization.Model): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceInfo', 'Stateless': 'StatelessServiceInfo'} } - def __init__( - self, - *, - id: Optional[str] = None, - name: Optional[str] = None, - type_name: Optional[str] = None, - manifest_version: Optional[str] = None, - health_state: Optional[Union[str, "HealthState"]] = None, - service_status: Optional[Union[str, "ServiceStatus"]] = None, - is_service_group: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, **kwargs) -> None: super(ServiceInfo, self).__init__(**kwargs) self.id = id - self.service_kind = None # type: Optional[str] self.name = name self.type_name = type_name self.manifest_version = manifest_version self.health_state = health_state self.service_status = service_status self.is_service_group = is_service_group + self.service_kind = None -class ServiceLoadMetricDescription(msrest.serialization.Model): +class ServiceLoadMetricDescription(Model): """Specifies a metric to load balance a service during runtime. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the metric. If the service chooses to report load during - runtime, the load metric name should match the name that is specified in Name exactly. Note - that metric names are case-sensitive. + :param name: Required. The name of the metric. If the service chooses to + report load during runtime, the load metric name should match the name + that is specified in Name exactly. Note that metric names are + case-sensitive. :type name: str - :param weight: The service load metric relative weight, compared to other metrics configured - for this service, as a number. Possible values include: "Zero", "Low", "Medium", "High". + :param weight: The service load metric relative weight, compared to other + metrics configured for this service, as a number. Possible values include: + 'Zero', 'Low', 'Medium', 'High' :type weight: str or ~azure.servicefabric.models.ServiceLoadMetricWeight - :param primary_default_load: Used only for Stateful services. The default amount of load, as a - number, that this service creates for this metric when it is a Primary replica. + :param primary_default_load: Used only for Stateful services. The default + amount of load, as a number, that this service creates for this metric + when it is a Primary replica. :type primary_default_load: int - :param secondary_default_load: Used only for Stateful services. The default amount of load, as - a number, that this service creates for this metric when it is a Secondary replica. + :param secondary_default_load: Used only for Stateful services. The + default amount of load, as a number, that this service creates for this + metric when it is a Secondary replica. :type secondary_default_load: int - :param default_load: Used only for Stateless services. The default amount of load, as a number, - that this service creates for this metric. + :param default_load: Used only for Stateless services. The default amount + of load, as a number, that this service creates for this metric. :type default_load: int """ @@ -23106,16 +19390,7 @@ class ServiceLoadMetricDescription(msrest.serialization.Model): 'default_load': {'key': 'DefaultLoad', 'type': 'int'}, } - def __init__( - self, - *, - name: str, - weight: Optional[Union[str, "ServiceLoadMetricWeight"]] = None, - primary_default_load: Optional[int] = None, - secondary_default_load: Optional[int] = None, - default_load: Optional[int] = None, - **kwargs - ): + def __init__(self, *, name: str, weight=None, primary_default_load: int=None, secondary_default_load: int=None, default_load: int=None, **kwargs) -> None: super(ServiceLoadMetricDescription, self).__init__(**kwargs) self.name = name self.weight = weight @@ -23124,15 +19399,16 @@ def __init__( self.default_load = default_load -class ServiceNameInfo(msrest.serialization.Model): +class ServiceNameInfo(Model): """Information about the service name. - :param id: The identity of the service. This ID is an encoded representation of the service - name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param id: The identity of the service. This ID is an encoded + representation of the service name. This is used in the REST APIs to + identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type id: str :param name: The full name of the service with 'fabric:' URI scheme. :type name: str @@ -23143,13 +19419,7 @@ class ServiceNameInfo(msrest.serialization.Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__( - self, - *, - id: Optional[str] = None, - name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, id: str=None, name: str=None, **kwargs) -> None: super(ServiceNameInfo, self).__init__(**kwargs) self.id = id self.name = name @@ -23160,44 +19430,25 @@ class ServiceNewHealthReportEvent(ServiceEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param service_id: Required. The identity of the service. This ID is an encoded representation - of the service name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This ID is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type service_id: str :param instance_id: Required. Id of Service instance. :type instance_id: long @@ -23213,16 +19464,17 @@ class ServiceNewHealthReportEvent(ServiceEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'service_id': {'required': True}, 'instance_id': {'required': True}, 'source_id': {'required': True}, @@ -23236,11 +19488,11 @@ class ServiceNewHealthReportEvent(ServiceEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_id': {'key': 'ServiceId', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -23253,27 +19505,8 @@ class ServiceNewHealthReportEvent(ServiceEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - service_id: str, - instance_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(ServiceNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) - self.kind = 'ServiceNewHealthReport' # type: str self.instance_id = instance_id self.source_id = source_id self.property = property @@ -23283,29 +19516,33 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ServiceNewHealthReport' -class ServicePartitionInfo(msrest.serialization.Model): +class ServicePartitionInfo(Model): """Information about a partition of a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServicePartitionInfo, StatelessServicePartitionInfo. + sub-classes are: StatefulServicePartitionInfo, + StatelessServicePartitionInfo All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service partition. Possible values - include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". - :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, partitioning scheme and - keys supported by it. - :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param partition_status: The status of the service fabric service + partition. Possible values include: 'Invalid', 'Ready', 'NotReady', + 'InQuorumLoss', 'Reconfiguring', 'Deleting' + :type partition_status: str or + ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, + partitioning scheme and keys supported by it. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -23313,44 +19550,39 @@ class ServicePartitionInfo(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServicePartitionInfo', 'Stateless': 'StatelessServicePartitionInfo'} } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - partition_status: Optional[Union[str, "ServicePartitionStatus"]] = None, - partition_information: Optional["PartitionInformation"] = None, - **kwargs - ): + def __init__(self, *, health_state=None, partition_status=None, partition_information=None, **kwargs) -> None: super(ServicePartitionInfo, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.health_state = health_state self.partition_status = partition_status self.partition_information = partition_information + self.service_kind = None -class ServicePlacementPolicyDescription(msrest.serialization.Model): +class ServicePlacementPolicyDescription(Model): """Describes the policy to be used for placement of a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, ServicePlacementInvalidDomainPolicyDescription, ServicePlacementNonPartiallyPlaceServicePolicyDescription, ServicePlacementPreferPrimaryDomainPolicyDescription, ServicePlacementRequiredDomainPolicyDescription, ServicePlacementRequireDomainDistributionPolicyDescription. + sub-classes are: ServicePlacementInvalidDomainPolicyDescription, + ServicePlacementNonPartiallyPlaceServicePolicyDescription, + ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, + ServicePlacementPreferPrimaryDomainPolicyDescription, + ServicePlacementRequiredDomainPolicyDescription, + ServicePlacementRequireDomainDistributionPolicyDescription All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param type: Required. Constant filled by server. + :type type: str """ _validation = { @@ -23362,29 +19594,26 @@ class ServicePlacementPolicyDescription(msrest.serialization.Model): } _subtype_map = { - 'type': {'AllowMultipleStatelessInstancesOnNode': 'ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription', 'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} + 'type': {'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'AllowMultipleStatelessInstancesOnNode': 'ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(ServicePlacementPolicyDescription, self).__init__(**kwargs) - self.type = None # type: Optional[str] + self.type = None class ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service allowing multiple stateless instances of a partition of the service to be placed on a node. + """Describes the policy to be used for placement of a Service Fabric service + allowing multiple stateless instances of a partition of the service to be + placed on a node. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: Holdover from other policy descriptions, not used for this policy, values - are ignored by runtime. Keeping it for any backwards-compatibility with clients. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: Holdover from other policy descriptions, not used for + this policy, values are ignored by runtime. Keeping it for any + backwards-compatibility with clients. :type domain_name: str """ @@ -23397,28 +19626,23 @@ class ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription(Ser 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - *, - domain_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, domain_name: str=None, **kwargs) -> None: super(ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, self).__init__(**kwargs) - self.type = 'AllowMultipleStatelessInstancesOnNode' # type: str self.domain_name = domain_name + self.type = 'AllowMultipleStatelessInstancesOnNode' class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where a particular fault or upgrade domain should not be used for placement of the instances or replicas of that service. + """Describes the policy to be used for placement of a Service Fabric service + where a particular fault or upgrade domain should not be used for placement + of the instances or replicas of that service. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: The name of the domain that should not be used for placement. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should not be used for + placement. :type domain_name: str """ @@ -23431,27 +19655,21 @@ class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescr 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - *, - domain_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, domain_name: str=None, **kwargs) -> None: super(ServicePlacementInvalidDomainPolicyDescription, self).__init__(**kwargs) - self.type = 'InvalidDomain' # type: str self.domain_name = domain_name + self.type = 'InvalidDomain' class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where all replicas must be able to be placed in order for any replicas to be created. + """Describes the policy to be used for placement of a Service Fabric service + where all replicas must be able to be placed in order for any replicas to + be created. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType + :param type: Required. Constant filled by server. + :type type: str """ _validation = { @@ -23462,27 +19680,29 @@ class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacement 'type': {'key': 'Type', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(ServicePlacementNonPartiallyPlaceServicePolicyDescription, self).__init__(**kwargs) - self.type = 'NonPartiallyPlaceService' # type: str + self.type = 'NonPartiallyPlaceService' class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where the service's Primary replicas should optimally be placed in a particular domain. - -This placement policy is usually used with fault domains in scenarios where the Service Fabric cluster is geographically distributed in order to indicate that a service's primary replica should be located in a particular fault domain, which in geo-distributed scenarios usually aligns with regional or datacenter boundaries. Note that since this is an optimization it is possible that the Primary replica may not end up located in this domain due to failures, capacity limits, or other constraints. + """Describes the policy to be used for placement of a Service Fabric service + where the service's Primary replicas should optimally be placed in a + particular domain. + This placement policy is usually used with fault domains in scenarios where + the Service Fabric cluster is geographically distributed in order to + indicate that a service's primary replica should be located in a particular + fault domain, which in geo-distributed scenarios usually aligns with + regional or datacenter boundaries. Note that since this is an optimization + it is possible that the Primary replica may not end up located in this + domain due to failures, capacity limits, or other constraints. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: The name of the domain that should used for placement as per this policy. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should used for placement + as per this policy. :type domain_name: str """ @@ -23495,28 +19715,23 @@ class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolic 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - *, - domain_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, domain_name: str=None, **kwargs) -> None: super(ServicePlacementPreferPrimaryDomainPolicyDescription, self).__init__(**kwargs) - self.type = 'PreferPrimaryDomain' # type: str self.domain_name = domain_name + self.type = 'PreferPrimaryDomain' class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where the instances or replicas of that service must be placed in a particular domain. + """Describes the policy to be used for placement of a Service Fabric service + where the instances or replicas of that service must be placed in a + particular domain. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: The name of the domain that should used for placement as per this policy. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should used for placement + as per this policy. :type domain_name: str """ @@ -23529,30 +19744,31 @@ class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDesc 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - *, - domain_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, domain_name: str=None, **kwargs) -> None: super(ServicePlacementRequiredDomainPolicyDescription, self).__init__(**kwargs) - self.type = 'RequireDomain' # type: str self.domain_name = domain_name + self.type = 'RequireDomain' class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacementPolicyDescription): - """Describes the policy to be used for placement of a Service Fabric service where two replicas from the same partition should never be placed in the same fault or upgrade domain. - -While this is not common it can expose the service to an increased risk of concurrent failures due to unplanned outages or other cases of subsequent/concurrent failures. As an example, consider a case where replicas are deployed across different data center, with one replica per location. In the event that one of the datacenters goes offline, normally the replica that was placed in that datacenter will be packed into one of the remaining datacenters. If this is not desirable then this policy should be set. + """Describes the policy to be used for placement of a Service Fabric service + where two replicas from the same partition should never be placed in the + same fault or upgrade domain. + While this is not common it can expose the service to an increased risk of + concurrent failures due to unplanned outages or other cases of + subsequent/concurrent failures. As an example, consider a case where + replicas are deployed across different data center, with one replica per + location. In the event that one of the datacenters goes offline, normally + the replica that was placed in that datacenter will be packed into one of + the remaining datacenters. If this is not desirable then this policy should + be set. All required parameters must be populated in order to send to Azure. - :param type: Required. The type of placement policy for a service fabric service. Following are - the possible values.Constant filled by server. Possible values include: "Invalid", - "InvalidDomain", "RequireDomain", "PreferPrimaryDomain", "RequireDomainDistribution", - "NonPartiallyPlaceService", "AllowMultipleStatelessInstancesOnNode". - :type type: str or ~azure.servicefabric.models.ServicePlacementPolicyType - :param domain_name: The name of the domain that should used for placement as per this policy. + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should used for placement + as per this policy. :type domain_name: str """ @@ -23565,42 +19781,40 @@ class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacemen 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__( - self, - *, - domain_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, domain_name: str=None, **kwargs) -> None: super(ServicePlacementRequireDomainDistributionPolicyDescription, self).__init__(**kwargs) - self.type = 'RequireDomainDistribution' # type: str self.domain_name = domain_name + self.type = 'RequireDomainDistribution' -class ServiceProperties(msrest.serialization.Model): +class ServiceProperties(Model): """Describes properties of a service resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. :param description: User readable description of the service. :type description: str - :param replica_count: The number of replicas of the service to create. Defaults to 1 if not - specified. + :param replica_count: The number of replicas of the service to create. + Defaults to 1 if not specified. :type replica_count: int - :param execution_policy: The execution policy of the service. + :param execution_policy: The execution policy of the service :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies. - :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :param auto_scaling_policies: Auto scaling policies + :type auto_scaling_policies: + list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the service. + :ivar status_details: Gives additional information about the current + status of the service. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. Possible values - include: "Invalid", "Ok", "Warning", "Error", "Unknown". + :ivar health_state: Describes the health state of an application resource. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional - details from service fabric Health Manager for the user to know why the service is marked - unhealthy. + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', + this additional details from service fabric Health Manager for the user to + know why the service is marked unhealthy. :vartype unhealthy_evaluation: str :param identity_refs: The service identity list. :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] @@ -23628,17 +19842,7 @@ class ServiceProperties(msrest.serialization.Model): 'dns_name': {'key': 'dnsName', 'type': 'str'}, } - def __init__( - self, - *, - description: Optional[str] = None, - replica_count: Optional[int] = None, - execution_policy: Optional["ExecutionPolicy"] = None, - auto_scaling_policies: Optional[List["AutoScalingPolicy"]] = None, - identity_refs: Optional[List["ServiceIdentity"]] = None, - dns_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, description: str=None, replica_count: int=None, execution_policy=None, auto_scaling_policies=None, identity_refs=None, dns_name: str=None, **kwargs) -> None: super(ServiceProperties, self).__init__(**kwargs) self.description = description self.replica_count = replica_count @@ -23652,19 +19856,22 @@ def __init__( self.dns_name = dns_name -class ServiceReplicaProperties(msrest.serialization.Model): +class ServiceReplicaProperties(Model): """Describes the properties of a service replica. All required parameters must be populated in order to send to Azure. - :param os_type: Required. The operation system required by the code in service. Possible values - include: "Linux", "Windows". + :param os_type: Required. The operation system required by the code in + service. Possible values include: 'Linux', 'Windows' :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that forms the service. A - code package describes the container and the properties for running it. All the code packages - are started together on the same host and share the same context (network, process etc.). - :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service needs to be part of. + :param code_packages: Required. Describes the set of code packages that + forms the service. A code package describes the container and the + properties for running it. All the code packages are started together on + the same host and share the same context (network, process etc.). + :type code_packages: + list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service + needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef @@ -23682,15 +19889,7 @@ class ServiceReplicaProperties(msrest.serialization.Model): 'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsRef'}, } - def __init__( - self, - *, - os_type: Union[str, "OperatingSystemType"], - code_packages: List["ContainerCodePackageProperties"], - network_refs: Optional[List["NetworkRef"]] = None, - diagnostics: Optional["DiagnosticsRef"] = None, - **kwargs - ): + def __init__(self, *, os_type, code_packages, network_refs=None, diagnostics=None, **kwargs) -> None: super(ServiceReplicaProperties, self).__init__(**kwargs) self.os_type = os_type self.code_packages = code_packages @@ -23703,14 +19902,17 @@ class ServiceReplicaDescription(ServiceReplicaProperties): All required parameters must be populated in order to send to Azure. - :param os_type: Required. The operation system required by the code in service. Possible values - include: "Linux", "Windows". + :param os_type: Required. The operation system required by the code in + service. Possible values include: 'Linux', 'Windows' :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that forms the service. A - code package describes the container and the properties for running it. All the code packages - are started together on the same host and share the same context (network, process etc.). - :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service needs to be part of. + :param code_packages: Required. Describes the set of code packages that + forms the service. A code package describes the container and the + properties for running it. All the code packages are started together on + the same host and share the same context (network, process etc.). + :type code_packages: + list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service + needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef @@ -23732,60 +19934,57 @@ class ServiceReplicaDescription(ServiceReplicaProperties): 'replica_name': {'key': 'replicaName', 'type': 'str'}, } - def __init__( - self, - *, - os_type: Union[str, "OperatingSystemType"], - code_packages: List["ContainerCodePackageProperties"], - replica_name: str, - network_refs: Optional[List["NetworkRef"]] = None, - diagnostics: Optional["DiagnosticsRef"] = None, - **kwargs - ): + def __init__(self, *, os_type, code_packages, replica_name: str, network_refs=None, diagnostics=None, **kwargs) -> None: super(ServiceReplicaDescription, self).__init__(os_type=os_type, code_packages=code_packages, network_refs=network_refs, diagnostics=diagnostics, **kwargs) self.replica_name = replica_name -class ServiceResourceDescription(msrest.serialization.Model): +class ServiceResourceDescription(Model): """This type describes a service resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the Service resource. :type name: str - :param os_type: Required. The operation system required by the code in service. Possible values - include: "Linux", "Windows". + :param os_type: Required. The operation system required by the code in + service. Possible values include: 'Linux', 'Windows' :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that forms the service. A - code package describes the container and the properties for running it. All the code packages - are started together on the same host and share the same context (network, process etc.). - :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service needs to be part of. + :param code_packages: Required. Describes the set of code packages that + forms the service. A code package describes the container and the + properties for running it. All the code packages are started together on + the same host and share the same context (network, process etc.). + :type code_packages: + list[~azure.servicefabric.models.ContainerCodePackageProperties] + :param network_refs: The names of the private networks that this service + needs to be part of. :type network_refs: list[~azure.servicefabric.models.NetworkRef] :param diagnostics: Reference to sinks in DiagnosticsDescription. :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef :param description: User readable description of the service. :type description: str - :param replica_count: The number of replicas of the service to create. Defaults to 1 if not - specified. + :param replica_count: The number of replicas of the service to create. + Defaults to 1 if not specified. :type replica_count: int - :param execution_policy: The execution policy of the service. + :param execution_policy: The execution policy of the service :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies. - :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :param auto_scaling_policies: Auto scaling policies + :type auto_scaling_policies: + list[~azure.servicefabric.models.AutoScalingPolicy] + :ivar status: Status of the service. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the service. + :ivar status_details: Gives additional information about the current + status of the service. :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. Possible values - include: "Invalid", "Ok", "Warning", "Error", "Unknown". + :ivar health_state: Describes the health state of an application resource. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional - details from service fabric Health Manager for the user to know why the service is marked - unhealthy. + :ivar unhealthy_evaluation: When the service's health state is not 'Ok', + this additional details from service fabric Health Manager for the user to + know why the service is marked unhealthy. :vartype unhealthy_evaluation: str :param identity_refs: The service identity list. :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] @@ -23821,22 +20020,7 @@ class ServiceResourceDescription(msrest.serialization.Model): 'dns_name': {'key': 'properties.dnsName', 'type': 'str'}, } - def __init__( - self, - *, - name: str, - os_type: Union[str, "OperatingSystemType"], - code_packages: List["ContainerCodePackageProperties"], - network_refs: Optional[List["NetworkRef"]] = None, - diagnostics: Optional["DiagnosticsRef"] = None, - description: Optional[str] = None, - replica_count: Optional[int] = None, - execution_policy: Optional["ExecutionPolicy"] = None, - auto_scaling_policies: Optional[List["AutoScalingPolicy"]] = None, - identity_refs: Optional[List["ServiceIdentity"]] = None, - dns_name: Optional[str] = None, - **kwargs - ): + def __init__(self, *, name: str, os_type, code_packages, network_refs=None, diagnostics=None, description: str=None, replica_count: int=None, execution_policy=None, auto_scaling_policies=None, identity_refs=None, dns_name: str=None, **kwargs) -> None: super(ServiceResourceDescription, self).__init__(**kwargs) self.name = name self.os_type = os_type @@ -23855,142 +20039,39 @@ def __init__( self.dns_name = dns_name -class ServiceResourceProperties(ServiceReplicaProperties, ServiceProperties): - """This type describes properties of a service resource. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :param description: User readable description of the service. - :type description: str - :param replica_count: The number of replicas of the service to create. Defaults to 1 if not - specified. - :type replica_count: int - :param execution_policy: The execution policy of the service. - :type execution_policy: ~azure.servicefabric.models.ExecutionPolicy - :param auto_scaling_policies: Auto scaling policies. - :type auto_scaling_policies: list[~azure.servicefabric.models.AutoScalingPolicy] - :ivar status: Status of the service. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". - :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the service. - :vartype status_details: str - :ivar health_state: Describes the health state of an application resource. Possible values - include: "Invalid", "Ok", "Warning", "Error", "Unknown". - :vartype health_state: str or ~azure.servicefabric.models.HealthState - :ivar unhealthy_evaluation: When the service's health state is not 'Ok', this additional - details from service fabric Health Manager for the user to know why the service is marked - unhealthy. - :vartype unhealthy_evaluation: str - :param identity_refs: The service identity list. - :type identity_refs: list[~azure.servicefabric.models.ServiceIdentity] - :param dns_name: Dns name of the service. - :type dns_name: str - :param os_type: Required. The operation system required by the code in service. Possible values - include: "Linux", "Windows". - :type os_type: str or ~azure.servicefabric.models.OperatingSystemType - :param code_packages: Required. Describes the set of code packages that forms the service. A - code package describes the container and the properties for running it. All the code packages - are started together on the same host and share the same context (network, process etc.). - :type code_packages: list[~azure.servicefabric.models.ContainerCodePackageProperties] - :param network_refs: The names of the private networks that this service needs to be part of. - :type network_refs: list[~azure.servicefabric.models.NetworkRef] - :param diagnostics: Reference to sinks in DiagnosticsDescription. - :type diagnostics: ~azure.servicefabric.models.DiagnosticsRef - """ - - _validation = { - 'status': {'readonly': True}, - 'status_details': {'readonly': True}, - 'health_state': {'readonly': True}, - 'unhealthy_evaluation': {'readonly': True}, - 'os_type': {'required': True}, - 'code_packages': {'required': True}, - } - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'replica_count': {'key': 'replicaCount', 'type': 'int'}, - 'execution_policy': {'key': 'executionPolicy', 'type': 'ExecutionPolicy'}, - 'auto_scaling_policies': {'key': 'autoScalingPolicies', 'type': '[AutoScalingPolicy]'}, - 'status': {'key': 'status', 'type': 'str'}, - 'status_details': {'key': 'statusDetails', 'type': 'str'}, - 'health_state': {'key': 'healthState', 'type': 'str'}, - 'unhealthy_evaluation': {'key': 'unhealthyEvaluation', 'type': 'str'}, - 'identity_refs': {'key': 'identityRefs', 'type': '[ServiceIdentity]'}, - 'dns_name': {'key': 'dnsName', 'type': 'str'}, - 'os_type': {'key': 'osType', 'type': 'str'}, - 'code_packages': {'key': 'codePackages', 'type': '[ContainerCodePackageProperties]'}, - 'network_refs': {'key': 'networkRefs', 'type': '[NetworkRef]'}, - 'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsRef'}, - } - - def __init__( - self, - *, - os_type: Union[str, "OperatingSystemType"], - code_packages: List["ContainerCodePackageProperties"], - description: Optional[str] = None, - replica_count: Optional[int] = None, - execution_policy: Optional["ExecutionPolicy"] = None, - auto_scaling_policies: Optional[List["AutoScalingPolicy"]] = None, - identity_refs: Optional[List["ServiceIdentity"]] = None, - dns_name: Optional[str] = None, - network_refs: Optional[List["NetworkRef"]] = None, - diagnostics: Optional["DiagnosticsRef"] = None, - **kwargs - ): - super(ServiceResourceProperties, self).__init__(os_type=os_type, code_packages=code_packages, network_refs=network_refs, diagnostics=diagnostics, description=description, replica_count=replica_count, execution_policy=execution_policy, auto_scaling_policies=auto_scaling_policies, identity_refs=identity_refs, dns_name=dns_name, **kwargs) - self.description = description - self.replica_count = replica_count - self.execution_policy = execution_policy - self.auto_scaling_policies = auto_scaling_policies - self.status = None - self.status_details = None - self.health_state = None - self.unhealthy_evaluation = None - self.identity_refs = identity_refs - self.dns_name = dns_name - self.os_type = os_type - self.code_packages = code_packages - self.network_refs = network_refs - self.diagnostics = diagnostics - - class ServicesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for services of a certain service type belonging to an application, containing health evaluations for each unhealthy service that impacted current aggregated health state. Can be returned when evaluating application health and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for services of a certain service type + belonging to an application, containing health evaluations for each + unhealthy service that impacted current aggregated health state. Can be + returned when evaluating application health and the aggregated health state + is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str + :param kind: Required. Constant filled by server. + :type kind: str :param service_type_name: Name of the service type of the services. :type service_type_name: str - :param max_percent_unhealthy_services: Maximum allowed percentage of unhealthy services from - the ServiceTypeHealthPolicy. + :param max_percent_unhealthy_services: Maximum allowed percentage of + unhealthy services from the ServiceTypeHealthPolicy. :type max_percent_unhealthy_services: int - :param total_count: Total number of services of the current service type in the application - from the health store. + :param total_count: Total number of services of the current service type + in the application from the health store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy ServiceHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ServiceHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -23998,62 +20079,57 @@ class ServicesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - service_type_name: Optional[str] = None, - max_percent_unhealthy_services: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, service_type_name: str=None, max_percent_unhealthy_services: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(ServicesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'Services' # type: str self.service_type_name = service_type_name self.max_percent_unhealthy_services = max_percent_unhealthy_services self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Services' -class ServiceTypeDescription(msrest.serialization.Model): - """Describes a service type defined in the service manifest of a provisioned application type. The properties the ones defined in the service manifest. +class ServiceTypeDescription(Model): + """Describes a service type defined in the service manifest of a provisioned + application type. The properties the ones defined in the service manifest. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceTypeDescription, StatelessServiceTypeDescription. + sub-classes are: StatefulServiceTypeDescription, + StatelessServiceTypeDescription All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. - Possible values include: "Invalid", "Stateless", "Stateful". - :type kind: str or ~azure.servicefabric.models.ServiceKind - :param is_stateful: Indicates whether the service type is a stateful service type or a - stateless service type. This property is true if the service type is a stateful service type, - false otherwise. + :param is_stateful: Indicates whether the service type is a stateful + service type or a stateless service type. This property is true if the + service type is a stateful service type, false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when instantiating this - service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when + instantiating this service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy descriptions. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy + descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :type extensions: + list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param kind: Required. Constant filled by server. + :type kind: str """ _validation = { @@ -24061,41 +20137,31 @@ class ServiceTypeDescription(msrest.serialization.Model): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, + 'kind': {'key': 'Kind', 'type': 'str'}, } _subtype_map = { 'kind': {'Stateful': 'StatefulServiceTypeDescription', 'Stateless': 'StatelessServiceTypeDescription'} } - def __init__( - self, - *, - is_stateful: Optional[bool] = None, - service_type_name: Optional[str] = None, - placement_constraints: Optional[str] = None, - load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, - service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, - extensions: Optional[List["ServiceTypeExtensionDescription"]] = None, - **kwargs - ): + def __init__(self, *, is_stateful: bool=None, service_type_name: str=None, placement_constraints: str=None, load_metrics=None, service_placement_policies=None, extensions=None, **kwargs) -> None: super(ServiceTypeDescription, self).__init__(**kwargs) - self.kind = None # type: Optional[str] self.is_stateful = is_stateful self.service_type_name = service_type_name self.placement_constraints = placement_constraints self.load_metrics = load_metrics self.service_placement_policies = service_placement_policies self.extensions = extensions + self.kind = None -class ServiceTypeExtensionDescription(msrest.serialization.Model): +class ServiceTypeExtensionDescription(Model): """Describes extension of a service type defined in the service manifest. :param key: The name of the extension. @@ -24109,56 +20175,51 @@ class ServiceTypeExtensionDescription(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__( - self, - *, - key: Optional[str] = None, - value: Optional[str] = None, - **kwargs - ): + def __init__(self, *, key: str=None, value: str=None, **kwargs) -> None: super(ServiceTypeExtensionDescription, self).__init__(**kwargs) self.key = key self.value = value -class ServiceTypeHealthPolicy(msrest.serialization.Model): - """Represents the health policy used to evaluate the health of services belonging to a service type. - - :param max_percent_unhealthy_partitions_per_service: The maximum allowed percentage of - unhealthy partitions per service. Allowed values are Byte values from zero to 100 - - The percentage represents the maximum tolerated percentage of partitions that can be unhealthy - before the service is considered in error. - If the percentage is respected but there is at least one unhealthy partition, the health is - evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy partitions over the total - number of partitions in the service. - The computation rounds up to tolerate one failure on small numbers of partitions. Default - percentage is zero. +class ServiceTypeHealthPolicy(Model): + """Represents the health policy used to evaluate the health of services + belonging to a service type. + + :param max_percent_unhealthy_partitions_per_service: The maximum allowed + percentage of unhealthy partitions per service. Allowed values are Byte + values from zero to 100 + The percentage represents the maximum tolerated percentage of partitions + that can be unhealthy before the service is considered in error. + If the percentage is respected but there is at least one unhealthy + partition, the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy + partitions over the total number of partitions in the service. + The computation rounds up to tolerate one failure on small numbers of + partitions. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_partitions_per_service: int - :param max_percent_unhealthy_replicas_per_partition: The maximum allowed percentage of - unhealthy replicas per partition. Allowed values are Byte values from zero to 100. - - The percentage represents the maximum tolerated percentage of replicas that can be unhealthy - before the partition is considered in error. - If the percentage is respected but there is at least one unhealthy replica, the health is - evaluated as Warning. - The percentage is calculated by dividing the number of unhealthy replicas over the total - number of replicas in the partition. - The computation rounds up to tolerate one failure on small numbers of replicas. Default - percentage is zero. + :param max_percent_unhealthy_replicas_per_partition: The maximum allowed + percentage of unhealthy replicas per partition. Allowed values are Byte + values from zero to 100. + The percentage represents the maximum tolerated percentage of replicas + that can be unhealthy before the partition is considered in error. + If the percentage is respected but there is at least one unhealthy + replica, the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy replicas + over the total number of replicas in the partition. + The computation rounds up to tolerate one failure on small numbers of + replicas. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_replicas_per_partition: int - :param max_percent_unhealthy_services: The maximum allowed percentage of unhealthy services. - Allowed values are Byte values from zero to 100. - - The percentage represents the maximum tolerated percentage of services that can be unhealthy - before the application is considered in error. - If the percentage is respected but there is at least one unhealthy service, the health is - evaluated as Warning. - This is calculated by dividing the number of unhealthy services of the specific service type - over the total number of services of the specific service type. - The computation rounds up to tolerate one failure on small numbers of services. Default - percentage is zero. + :param max_percent_unhealthy_services: The maximum allowed percentage of + unhealthy services. Allowed values are Byte values from zero to 100. + The percentage represents the maximum tolerated percentage of services + that can be unhealthy before the application is considered in error. + If the percentage is respected but there is at least one unhealthy + service, the health is evaluated as Warning. + This is calculated by dividing the number of unhealthy services of the + specific service type over the total number of services of the specific + service type. + The computation rounds up to tolerate one failure on small numbers of + services. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_services: int """ @@ -24168,30 +20229,23 @@ class ServiceTypeHealthPolicy(msrest.serialization.Model): 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, } - def __init__( - self, - *, - max_percent_unhealthy_partitions_per_service: Optional[int] = 0, - max_percent_unhealthy_replicas_per_partition: Optional[int] = 0, - max_percent_unhealthy_services: Optional[int] = 0, - **kwargs - ): + def __init__(self, *, max_percent_unhealthy_partitions_per_service: int=0, max_percent_unhealthy_replicas_per_partition: int=0, max_percent_unhealthy_services: int=0, **kwargs) -> None: super(ServiceTypeHealthPolicy, self).__init__(**kwargs) self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition self.max_percent_unhealthy_services = max_percent_unhealthy_services -class ServiceTypeHealthPolicyMapItem(msrest.serialization.Model): +class ServiceTypeHealthPolicyMapItem(Model): """Defines an item in ServiceTypeHealthPolicyMap. All required parameters must be populated in order to send to Azure. - :param key: Required. The key of the service type health policy map item. This is the name of - the service type. + :param key: Required. The key of the service type health policy map item. + This is the name of the service type. :type key: str - :param value: Required. The value of the service type health policy map item. This is the - ServiceTypeHealthPolicy for this service type. + :param value: Required. The value of the service type health policy map + item. This is the ServiceTypeHealthPolicy for this service type. :type value: ~azure.servicefabric.models.ServiceTypeHealthPolicy """ @@ -24205,32 +20259,29 @@ class ServiceTypeHealthPolicyMapItem(msrest.serialization.Model): 'value': {'key': 'Value', 'type': 'ServiceTypeHealthPolicy'}, } - def __init__( - self, - *, - key: str, - value: "ServiceTypeHealthPolicy", - **kwargs - ): + def __init__(self, *, key: str, value, **kwargs) -> None: super(ServiceTypeHealthPolicyMapItem, self).__init__(**kwargs) self.key = key self.value = value -class ServiceTypeInfo(msrest.serialization.Model): - """Information about a service type that is defined in a service manifest of a provisioned application type. +class ServiceTypeInfo(Model): + """Information about a service type that is defined in a service manifest of a + provisioned application type. - :param service_type_description: Describes a service type defined in the service manifest of a - provisioned application type. The properties the ones defined in the service manifest. - :type service_type_description: ~azure.servicefabric.models.ServiceTypeDescription - :param service_manifest_name: The name of the service manifest in which this service type is - defined. + :param service_type_description: Describes a service type defined in the + service manifest of a provisioned application type. The properties the + ones defined in the service manifest. + :type service_type_description: + ~azure.servicefabric.models.ServiceTypeDescription + :param service_manifest_name: The name of the service manifest in which + this service type is defined. :type service_manifest_name: str - :param service_manifest_version: The version of the service manifest in which this service type - is defined. + :param service_manifest_version: The version of the service manifest in + which this service type is defined. :type service_manifest_version: str - :param is_service_group: Indicates whether the service is a service group. If it is, the - property value is true otherwise false. + :param is_service_group: Indicates whether the service is a service group. + If it is, the property value is true otherwise false. :type is_service_group: bool """ @@ -24241,15 +20292,7 @@ class ServiceTypeInfo(msrest.serialization.Model): 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, } - def __init__( - self, - *, - service_type_description: Optional["ServiceTypeDescription"] = None, - service_manifest_name: Optional[str] = None, - service_manifest_version: Optional[str] = None, - is_service_group: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, service_type_description=None, service_manifest_name: str=None, service_manifest_version: str=None, is_service_group: bool=None, **kwargs) -> None: super(ServiceTypeInfo, self).__init__(**kwargs) self.service_type_description = service_type_description self.service_manifest_name = service_manifest_name @@ -24257,8 +20300,9 @@ def __init__( self.is_service_group = is_service_group -class ServiceTypeManifest(msrest.serialization.Model): - """Contains the manifest describing a service type registered as part of an application in a Service Fabric cluster. +class ServiceTypeManifest(Model): + """Contains the manifest describing a service type registered as part of an + application in a Service Fabric cluster. :param manifest: The XML manifest as a string. :type manifest: str @@ -24268,91 +20312,101 @@ class ServiceTypeManifest(msrest.serialization.Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__( - self, - *, - manifest: Optional[str] = None, - **kwargs - ): + def __init__(self, *, manifest: str=None, **kwargs) -> None: super(ServiceTypeManifest, self).__init__(**kwargs) self.manifest = manifest -class ServiceUpdateDescription(msrest.serialization.Model): - """A ServiceUpdateDescription contains all of the information necessary to update a service. +class ServiceUpdateDescription(Model): + """A ServiceUpdateDescription contains all of the information necessary to + update a service. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: StatefulServiceUpdateDescription, StatelessServiceUpdateDescription. - - All required parameters must be populated in order to send to Azure. - - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and - QuorumLossWaitDuration (4) are set. - - - * None - Does not indicate any other properties are set. The value is zero. - * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property - (for Stateful services) or the InstanceCount property (for Stateless services) is set. The - value is 1. - * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The - value is 2. - * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is - 4. - * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The - value is 8. - * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. - * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. - * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is - 64. - * Correlation - Indicates the CorrelationScheme property is set. The value is 128. - * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. - * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. - * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. - * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The - value is 2048. - * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. - * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is - 8192. - * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 16384. - * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 32768. - * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value - is 65536. - * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. - * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. - * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. + sub-classes are: StatefulServiceUpdateDescription, + StatelessServiceUpdateDescription + + All required parameters must be populated in order to send to Azure. + + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - TargetReplicaSetSize/InstanceCount - Indicates whether the + TargetReplicaSetSize property (for Stateful services) or the InstanceCount + property (for Stateless services) is set. The value is 1. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 2. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 4. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 8. + - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The + value is 16. + - PlacementConstraints - Indicates the PlacementConstraints property is + set. The value is 32. + - PlacementPolicyList - Indicates the ServicePlacementPolicies property is + set. The value is 64. + - Correlation - Indicates the CorrelationScheme property is set. The value + is 128. + - Metrics - Indicates the ServiceLoadMetrics property is set. The value is + 256. + - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The + value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. + - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit + property is set. The value is 2048. + - MinInstanceCount - Indicates the MinInstanceCount property is set. The + value is 4096. + - MinInstancePercentage - Indicates the MinInstancePercentage property is + set. The value is 8192. + - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 16384. + - InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 32768. + - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property + is set. The value is 65536. + - ServiceDnsName - Indicates the ServiceDnsName property is set. The value + is 131072. + - TagsForPlacement - Indicates the TagsForPlacement property is set. The + value is 1048576. + - TagsForRunning - Indicates the TagsForRunning property is set. The value + is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param service_dns_name: The DNS name of the service. :type service_dns_name: str :param tags_for_placement: Tags for placement of this service. :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription :param tags_for_running: Tags for running of this service. :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -24360,7 +20414,6 @@ class ServiceUpdateDescription(msrest.serialization.Model): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -24371,29 +20424,15 @@ class ServiceUpdateDescription(msrest.serialization.Model): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } _subtype_map = { 'service_kind': {'Stateful': 'StatefulServiceUpdateDescription', 'Stateless': 'StatelessServiceUpdateDescription'} } - def __init__( - self, - *, - flags: Optional[str] = None, - placement_constraints: Optional[str] = None, - correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, - load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, - service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, - default_move_cost: Optional[Union[str, "MoveCost"]] = None, - scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, - service_dns_name: Optional[str] = None, - tags_for_placement: Optional["NodeTagsDescription"] = None, - tags_for_running: Optional["NodeTagsDescription"] = None, - **kwargs - ): + def __init__(self, *, flags: str=None, placement_constraints: str=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, scaling_policies=None, service_dns_name: str=None, tags_for_placement=None, tags_for_running=None, **kwargs) -> None: super(ServiceUpdateDescription, self).__init__(**kwargs) - self.service_kind = None # type: Optional[str] self.flags = flags self.placement_constraints = placement_constraints self.correlation_scheme = correlation_scheme @@ -24404,18 +20443,20 @@ def __init__( self.service_dns_name = service_dns_name self.tags_for_placement = tags_for_placement self.tags_for_running = tags_for_running + self.service_kind = None -class ServiceUpgradeProgress(msrest.serialization.Model): - """Information about how many replicas are completed or pending for a specific service during upgrade. +class ServiceUpgradeProgress(Model): + """Information about how many replicas are completed or pending for a specific + service during upgrade. :param service_name: Name of the Service resource. :type service_name: str - :param completed_replica_count: The number of replicas that completes the upgrade in the - service. + :param completed_replica_count: The number of replicas that completes the + upgrade in the service. :type completed_replica_count: str - :param pending_replica_count: The number of replicas that are waiting to be upgraded in the - service. + :param pending_replica_count: The number of replicas that are waiting to + be upgraded in the service. :type pending_replica_count: str """ @@ -24425,29 +20466,26 @@ class ServiceUpgradeProgress(msrest.serialization.Model): 'pending_replica_count': {'key': 'PendingReplicaCount', 'type': 'str'}, } - def __init__( - self, - *, - service_name: Optional[str] = None, - completed_replica_count: Optional[str] = None, - pending_replica_count: Optional[str] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, completed_replica_count: str=None, pending_replica_count: str=None, **kwargs) -> None: super(ServiceUpgradeProgress, self).__init__(**kwargs) self.service_name = service_name self.completed_replica_count = completed_replica_count self.pending_replica_count = pending_replica_count -class Setting(msrest.serialization.Model): - """Describes a setting for the container. The setting file path can be fetched from environment variable "Fabric_SettingPath". The path for Windows container is "C:\secrets". The path for Linux container is "/var/secrets". +class Setting(Model): + """Describes a setting for the container. The setting file path can be fetched + from environment variable "Fabric_SettingPath". The path for Windows + container is "C:\\secrets". The path for Linux container is "/var/secrets". - :param type: The type of the setting being given in value. Possible values include: - "ClearText", "KeyVaultReference", "SecretValueReference". Default value: "ClearText". + :param type: The type of the setting being given in value. Possible values + include: 'ClearText', 'KeyVaultReference', 'SecretValueReference'. Default + value: "ClearText" . :type type: str or ~azure.servicefabric.models.SettingType :param name: The name of the setting. :type name: str - :param value: The value of the setting, will be processed based on the type provided. + :param value: The value of the setting, will be processed based on the + type provided. :type value: str """ @@ -24457,14 +20495,7 @@ class Setting(msrest.serialization.Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__( - self, - *, - type: Optional[Union[str, "SettingType"]] = "ClearText", - name: Optional[str] = None, - value: Optional[str] = None, - **kwargs - ): + def __init__(self, *, type="ClearText", name: str=None, value: str=None, **kwargs) -> None: super(Setting, self).__init__(**kwargs) self.type = type self.name = name @@ -24472,19 +20503,20 @@ def __init__( class SingletonPartitionInformation(PartitionInformation): - """Information about a partition that is singleton. The services with singleton partitioning scheme are effectively non-partitioned. They only have one partition. + """Information about a partition that is singleton. The services with + singleton partitioning scheme are effectively non-partitioned. They only + have one partition. All required parameters must be populated in order to send to Azure. - :param service_partition_kind: Required. The kind of partitioning scheme used to partition the - service.Constant filled by server. Possible values include: "Invalid", "Singleton", - "Int64Range", "Named". - :type service_partition_kind: str or ~azure.servicefabric.models.ServicePartitionKind - :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a - randomly generated GUID when the service was created. The partition ID is unique and does not - change for the lifetime of the service. If the same service was deleted and recreated the IDs - of its partitions would be different. + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str """ _validation = { @@ -24492,28 +20524,23 @@ class SingletonPartitionInformation(PartitionInformation): } _attribute_map = { - 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, } - def __init__( - self, - *, - id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, id: str=None, **kwargs) -> None: super(SingletonPartitionInformation, self).__init__(id=id, **kwargs) - self.service_partition_kind = 'Singleton' # type: str + self.service_partition_kind = 'Singleton' class SingletonPartitionSchemeDescription(PartitionSchemeDescription): - """Describes the partition scheme of a singleton-partitioned, or non-partitioned service. + """Describes the partition scheme of a singleton-partitioned, or + non-partitioned service. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by - server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". - :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str """ _validation = { @@ -24524,66 +20551,76 @@ class SingletonPartitionSchemeDescription(PartitionSchemeDescription): 'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'}, } - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs) -> None: super(SingletonPartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = 'Singleton' # type: str + self.partition_scheme = 'Singleton' -class StartClusterUpgradeDescription(msrest.serialization.Model): +class StartClusterUpgradeDescription(Model): """Describes the parameters for starting a cluster upgrade. :param code_version: The cluster code version. :type code_version: str :param config_version: The cluster configuration version. :type config_version: str - :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values - include: "Invalid", "Rolling". Default value: "Rolling". + :param upgrade_kind: The kind of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling'. Default value: + "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind - :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. The - values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: - "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: - "UnmonitoredAuto". + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode - :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block - processing of an upgrade domain and prevent loss of availability when there are unexpected - issues. When this timeout expires, processing of the upgrade domain will proceed regardless of - availability loss issues. The timeout is reset at the start of each upgrade domain. Valid - values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). :type upgrade_replica_set_check_timeout_in_seconds: long - :param force_restart: If true, then processes are forcefully restarted during upgrade even when - the code version has not changed (the upgrade only changes configuration or data). + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). :type force_restart: bool - :param sort_order: Defines the order in which an upgrade proceeds through the cluster. Possible - values include: "Invalid", "Default", "Numeric", "Lexicographical", "ReverseNumeric", - "ReverseLexicographical". Default value: "Default". + :param sort_order: Defines the order in which an upgrade proceeds through + the cluster. Possible values include: 'Invalid', 'Default', 'Numeric', + 'Lexicographical', 'ReverseNumeric', 'ReverseLexicographical'. Default + value: "Default" . :type sort_order: str or ~azure.servicefabric.models.UpgradeSortOrder - :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. - :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than - absolute health evaluation after completion of each upgrade domain. + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health + evaluation rather than absolute health evaluation after completion of each + upgrade domain. :type enable_delta_health_evaluation: bool - :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of - the cluster during a cluster upgrade. + :param cluster_upgrade_health_policy: Defines a health policy used to + evaluate the health of the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Defines the application health policy map used to - evaluate the health of an application or one of its children entities. - :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicies - :param instance_close_delay_duration_in_seconds: Duration in seconds, to wait before a - stateless instance is closed, to allow the active requests to drain gracefully. This would be - effective when the instance is closing during the application/cluster - upgrade, only for those instances which have a non-zero delay duration configured in the - service description. See InstanceCloseDelayDurationSeconds property in $ref: + :param application_health_policy_map: Defines the application health + policy map used to evaluate the health of an application or one of its + children entities. + :type application_health_policy_map: + ~azure.servicefabric.models.ApplicationHealthPolicies + :param instance_close_delay_duration_in_seconds: Duration in seconds, to + wait before a stateless instance is closed, to allow the active requests + to drain gracefully. This would be effective when the instance is closing + during the application/cluster + upgrade, only for those instances which have a non-zero delay duration + configured in the service description. See + InstanceCloseDelayDurationSeconds property in $ref: "#/definitions/StatelessServiceDescription.yaml" for details. - Note, the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates - that the behavior will entirely depend on the delay configured in the stateless service - description. + Note, the default value of InstanceCloseDelayDurationInSeconds is + 4294967295, which indicates that the behavior will entirely depend on the + delay configured in the stateless service description. :type instance_close_delay_duration_in_seconds: long """ @@ -24603,24 +20640,7 @@ class StartClusterUpgradeDescription(msrest.serialization.Model): 'instance_close_delay_duration_in_seconds': {'key': 'InstanceCloseDelayDurationInSeconds', 'type': 'long'}, } - def __init__( - self, - *, - code_version: Optional[str] = None, - config_version: Optional[str] = None, - upgrade_kind: Optional[Union[str, "UpgradeKind"]] = "Rolling", - rolling_upgrade_mode: Optional[Union[str, "UpgradeMode"]] = "UnmonitoredAuto", - upgrade_replica_set_check_timeout_in_seconds: Optional[int] = 42949672925, - force_restart: Optional[bool] = False, - sort_order: Optional[Union[str, "UpgradeSortOrder"]] = "Default", - monitoring_policy: Optional["MonitoringPolicyDescription"] = None, - cluster_health_policy: Optional["ClusterHealthPolicy"] = None, - enable_delta_health_evaluation: Optional[bool] = None, - cluster_upgrade_health_policy: Optional["ClusterUpgradeHealthPolicyObject"] = None, - application_health_policy_map: Optional["ApplicationHealthPolicies"] = None, - instance_close_delay_duration_in_seconds: Optional[int] = 4294967295, - **kwargs - ): + def __init__(self, *, code_version: str=None, config_version: str=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, sort_order="Default", monitoring_policy=None, cluster_health_policy=None, enable_delta_health_evaluation: bool=None, cluster_upgrade_health_policy=None, application_health_policy_map=None, instance_close_delay_duration_in_seconds: int=None, **kwargs) -> None: super(StartClusterUpgradeDescription, self).__init__(**kwargs) self.code_version = code_version self.config_version = config_version @@ -24642,37 +20662,31 @@ class StartedChaosEvent(ChaosEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param chaos_parameters: Defines all the parameters to configure a Chaos run. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param chaos_parameters: Defines all the parameters to configure a Chaos + run. :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'ChaosParameters'}, } - def __init__( - self, - *, - time_stamp_utc: datetime.datetime, - chaos_parameters: Optional["ChaosParameters"] = None, - **kwargs - ): + def __init__(self, *, time_stamp_utc, chaos_parameters=None, **kwargs) -> None: super(StartedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) - self.kind = 'Started' # type: str self.chaos_parameters = chaos_parameters + self.kind = 'Started' class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): @@ -24680,48 +20694,31 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param replica_instance_id: Required. Id of Replica instance. :type replica_instance_id: long @@ -24737,16 +20734,17 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'replica_instance_id': {'required': True}, @@ -24761,11 +20759,11 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, @@ -24779,28 +20777,8 @@ class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - replica_id: int, - replica_instance_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, replica_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(StatefulReplicaHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) - self.kind = 'StatefulReplicaHealthReportExpired' # type: str self.replica_instance_id = replica_instance_id self.source_id = source_id self.property = property @@ -24810,6 +20788,7 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'StatefulReplicaHealthReportExpired' class StatefulReplicaNewHealthReportEvent(ReplicaEvent): @@ -24817,48 +20796,31 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param replica_instance_id: Required. Id of Replica instance. :type replica_instance_id: long @@ -24874,16 +20836,17 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'replica_instance_id': {'required': True}, @@ -24898,11 +20861,11 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, @@ -24916,28 +20879,8 @@ class StatefulReplicaNewHealthReportEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - replica_id: int, - replica_instance_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, replica_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(StatefulReplicaNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) - self.kind = 'StatefulReplicaNewHealthReport' # type: str self.replica_instance_id = replica_instance_id self.source_id = source_id self.property = property @@ -24947,6 +20890,7 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'StatefulReplicaNewHealthReport' class StatefulServiceDescription(ServiceDescription): @@ -24954,105 +20898,121 @@ class StatefulServiceDescription(ServiceDescription): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified in the service - manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. Initialization data - is passed to service instances or replicas when they are created. + :param initialization_data: The initialization data as an array of bytes. + Initialization data is passed to service instances or replicas when they + are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an object. - :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an + object. + :type partition_description: + ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost + property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service package to be used for a - service. Possible values include: "SharedProcess", "ExclusiveProcess". + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS system service to be - enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param tags_required_to_place: Tags for placement of this service. - :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_place: + ~azure.servicefabric.models.NodeTagsDescription :param tags_required_to_run: Tags for running of this service. - :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription - :param target_replica_set_size: Required. The target replica set size as a number. + :type tags_required_to_run: + ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param target_replica_set_size: Required. The target replica set size as a + number. :type target_replica_set_size: int - :param min_replica_set_size: Required. The minimum replica set size as a number. + :param min_replica_set_size: Required. The minimum replica set size as a + number. :type min_replica_set_size: int - :param has_persisted_state: Required. A flag indicating whether this is a persistent service - which stores states on the local disk. If it is then the value of this property is true, if not - it is false. + :param has_persisted_state: Required. A flag indicating whether this is a + persistent service which stores states on the local disk. If it is then + the value of this property is true, if not it is false. :type has_persisted_state: bool - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then the flags for QuorumLossWaitDuration (2) and - StandByReplicaKeepDuration(4) are set. - - - * None - Does not indicate any other properties are set. The value is zero. - * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The - value is 1. - * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is - 2. - * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The - value is 4. - * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The - value is 8. - * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value - is 16. + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + QuorumLossWaitDuration (2) and StandByReplicaKeepDuration(4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 1. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 2. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 4. + - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit + property is set. The value is 8. + - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property + is set. The value is 16. :type flags: int - :param replica_restart_wait_duration_seconds: The duration, in seconds, between when a replica - goes down and when a new replica is created. + :param replica_restart_wait_duration_seconds: The duration, in seconds, + between when a replica goes down and when a new replica is created. :type replica_restart_wait_duration_seconds: long - :param quorum_loss_wait_duration_seconds: The maximum duration, in seconds, for which a - partition is allowed to be in a state of quorum loss. + :param quorum_loss_wait_duration_seconds: The maximum duration, in + seconds, for which a partition is allowed to be in a state of quorum loss. :type quorum_loss_wait_duration_seconds: long - :param stand_by_replica_keep_duration_seconds: The definition on how long StandBy replicas - should be maintained before being removed. + :param stand_by_replica_keep_duration_seconds: The definition on how long + StandBy replicas should be maintained before being removed. :type stand_by_replica_keep_duration_seconds: long - :param service_placement_time_limit_seconds: The duration for which replicas can stay InBuild - before reporting that build is stuck. + :param service_placement_time_limit_seconds: The duration for which + replicas can stay InBuild before reporting that build is stuck. :type service_placement_time_limit_seconds: long - :param drop_source_replica_on_move: Indicates whether to drop source Secondary replica even if - the target replica has not finished build. If desired behavior is to drop it as soon as - possible the value of this property is true, if not it is false. + :param drop_source_replica_on_move: Indicates whether to drop source + Secondary replica even if the target replica has not finished build. If + desired behavior is to drop it as soon as possible the value of this + property is true, if not it is false. :type drop_source_replica_on_move: bool - :param replica_lifecycle_description: Defines how replicas of this service will behave during - their lifecycle. - :type replica_lifecycle_description: ~azure.servicefabric.models.ReplicaLifecycleDescription + :param replica_lifecycle_description: Defines how replicas of this service + will behave during their lifecycle. + :type replica_lifecycle_description: + ~azure.servicefabric.models.ReplicaLifecycleDescription """ _validation = { - 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, + 'service_kind': {'required': True}, 'target_replica_set_size': {'required': True, 'minimum': 1}, 'min_replica_set_size': {'required': True, 'minimum': 1}, 'has_persisted_state': {'required': True}, @@ -25063,7 +21023,6 @@ class StatefulServiceDescription(ServiceDescription): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -25080,6 +21039,7 @@ class StatefulServiceDescription(ServiceDescription): 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, @@ -25092,39 +21052,8 @@ class StatefulServiceDescription(ServiceDescription): 'replica_lifecycle_description': {'key': 'ReplicaLifecycleDescription', 'type': 'ReplicaLifecycleDescription'}, } - def __init__( - self, - *, - service_name: str, - service_type_name: str, - partition_description: "PartitionSchemeDescription", - target_replica_set_size: int, - min_replica_set_size: int, - has_persisted_state: bool, - application_name: Optional[str] = None, - initialization_data: Optional[List[int]] = None, - placement_constraints: Optional[str] = None, - correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, - service_load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, - service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, - default_move_cost: Optional[Union[str, "MoveCost"]] = None, - is_default_move_cost_specified: Optional[bool] = None, - service_package_activation_mode: Optional[Union[str, "ServicePackageActivationMode"]] = None, - service_dns_name: Optional[str] = None, - scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, - tags_required_to_place: Optional["NodeTagsDescription"] = None, - tags_required_to_run: Optional["NodeTagsDescription"] = None, - flags: Optional[int] = None, - replica_restart_wait_duration_seconds: Optional[int] = None, - quorum_loss_wait_duration_seconds: Optional[int] = None, - stand_by_replica_keep_duration_seconds: Optional[int] = None, - service_placement_time_limit_seconds: Optional[int] = None, - drop_source_replica_on_move: Optional[bool] = None, - replica_lifecycle_description: Optional["ReplicaLifecycleDescription"] = None, - **kwargs - ): + def __init__(self, *, service_name: str, service_type_name: str, partition_description, target_replica_set_size: int, min_replica_set_size: int, has_persisted_state: bool, application_name: str=None, initialization_data=None, placement_constraints: str=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified: bool=None, service_package_activation_mode=None, service_dns_name: str=None, scaling_policies=None, tags_required_to_place=None, tags_required_to_run=None, flags: int=None, replica_restart_wait_duration_seconds: int=None, quorum_loss_wait_duration_seconds: int=None, stand_by_replica_keep_duration_seconds: int=None, service_placement_time_limit_seconds: int=None, drop_source_replica_on_move: bool=None, replica_lifecycle_description=None, **kwargs) -> None: super(StatefulServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name, scaling_policies=scaling_policies, tags_required_to_place=tags_required_to_place, tags_required_to_run=tags_required_to_run, **kwargs) - self.service_kind = 'Stateful' # type: str self.target_replica_set_size = target_replica_set_size self.min_replica_set_size = min_replica_set_size self.has_persisted_state = has_persisted_state @@ -25135,6 +21064,7 @@ def __init__( self.service_placement_time_limit_seconds = service_placement_time_limit_seconds self.drop_source_replica_on_move = drop_source_replica_on_move self.replica_lifecycle_description = replica_lifecycle_description + self.service_kind = 'Stateful' class StatefulServiceInfo(ServiceInfo): @@ -25142,31 +21072,33 @@ class StatefulServiceInfo(ServiceInfo): All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded representation of the service - name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param id: The identity of the service. This ID is an encoded + representation of the service name. This is used in the REST APIs to + identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type id: str - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service manifest. + :param type_name: Name of the service type as specified in the service + manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values include: "Unknown", - "Active", "Upgrading", "Deleting", "Creating", "Failed". + :param service_status: The status of the application. Possible values + include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', + 'Failed' :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param has_persisted_state: Whether the service has persisted state. :type has_persisted_state: bool """ @@ -25177,32 +21109,20 @@ class StatefulServiceInfo(ServiceInfo): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__( - self, - *, - id: Optional[str] = None, - name: Optional[str] = None, - type_name: Optional[str] = None, - manifest_version: Optional[str] = None, - health_state: Optional[Union[str, "HealthState"]] = None, - service_status: Optional[Union[str, "ServiceStatus"]] = None, - is_service_group: Optional[bool] = None, - has_persisted_state: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, has_persisted_state: bool=None, **kwargs) -> None: super(StatefulServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group, **kwargs) - self.service_kind = 'Stateful' # type: str self.has_persisted_state = has_persisted_state + self.service_kind = 'Stateful' class StatefulServicePartitionInfo(ServicePartitionInfo): @@ -25210,31 +21130,35 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service partition. Possible values - include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". - :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, partitioning scheme and - keys supported by it. - :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param partition_status: The status of the service fabric service + partition. Possible values include: 'Invalid', 'Ready', 'NotReady', + 'InQuorumLoss', 'Reconfiguring', 'Deleting' + :type partition_status: str or + ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, + partitioning scheme and keys supported by it. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: long :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: long - :param last_quorum_loss_duration: The duration for which this partition was in quorum loss. If - the partition is currently in quorum loss, it returns the duration since it has been in that - state. This field is using ISO8601 format for specifying the duration. - :type last_quorum_loss_duration: ~datetime.timedelta - :param primary_epoch: An Epoch is a configuration number for the partition as a whole. When the - configuration of the replica set changes, for example when the Primary replica changes, the - operations that are replicated from the new Primary replica are said to be a new Epoch from the - ones which were sent by the old Primary replica. + :param last_quorum_loss_duration: The duration for which this partition + was in quorum loss. If the partition is currently in quorum loss, it + returns the duration since it has been in that state. This field is using + ISO8601 format for specifying the duration. + :type last_quorum_loss_duration: timedelta + :param primary_epoch: An Epoch is a configuration number for the partition + as a whole. When the configuration of the replica set changes, for example + when the Primary replica changes, the operations that are replicated from + the new Primary replica are said to be a new Epoch from the ones which + were sent by the old Primary replica. :type primary_epoch: ~azure.servicefabric.models.Epoch """ @@ -25243,67 +21167,60 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'long'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'long'}, 'last_quorum_loss_duration': {'key': 'LastQuorumLossDuration', 'type': 'duration'}, 'primary_epoch': {'key': 'PrimaryEpoch', 'type': 'Epoch'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - partition_status: Optional[Union[str, "ServicePartitionStatus"]] = None, - partition_information: Optional["PartitionInformation"] = None, - target_replica_set_size: Optional[int] = None, - min_replica_set_size: Optional[int] = None, - last_quorum_loss_duration: Optional[datetime.timedelta] = None, - primary_epoch: Optional["Epoch"] = None, - **kwargs - ): + def __init__(self, *, health_state=None, partition_status=None, partition_information=None, target_replica_set_size: int=None, min_replica_set_size: int=None, last_quorum_loss_duration=None, primary_epoch=None, **kwargs) -> None: super(StatefulServicePartitionInfo, self).__init__(health_state=health_state, partition_status=partition_status, partition_information=partition_information, **kwargs) - self.service_kind = 'Stateful' # type: str self.target_replica_set_size = target_replica_set_size self.min_replica_set_size = min_replica_set_size self.last_quorum_loss_duration = last_quorum_loss_duration self.primary_epoch = primary_epoch + self.service_kind = 'Stateful' class StatefulServiceReplicaHealth(ReplicaHealth): """Represents the health of the stateful service replica. -Contains the replica aggregated health state, the health events and the unhealthy evaluations. + Contains the replica aggregated health state, the health events and the + unhealthy evaluations. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str """ @@ -25316,46 +21233,41 @@ class StatefulServiceReplicaHealth(ReplicaHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - partition_id: Optional[str] = None, - replica_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, replica_id: str=None, **kwargs) -> None: super(StatefulServiceReplicaHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, partition_id=partition_id, **kwargs) - self.service_kind = 'Stateful' # type: str self.replica_id = replica_id + self.service_kind = 'Stateful' class StatefulServiceReplicaHealthState(ReplicaHealthState): - """Represents the health state of the stateful service replica, which contains the replica ID and the aggregated health state. + """Represents the health state of the stateful service replica, which contains + the replica ID and the aggregated health state. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param partition_id: The ID of the partition to which this replica belongs. + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: The ID of the partition to which this replica + belongs. :type partition_id: str - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str """ @@ -25365,53 +21277,52 @@ class StatefulServiceReplicaHealthState(ReplicaHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - partition_id: Optional[str] = None, - replica_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, partition_id: str=None, replica_id: str=None, **kwargs) -> None: super(StatefulServiceReplicaHealthState, self).__init__(aggregated_health_state=aggregated_health_state, partition_id=partition_id, **kwargs) - self.service_kind = 'Stateful' # type: str self.replica_id = replica_id + self.service_kind = 'Stateful' class StatefulServiceReplicaInfo(ReplicaInfo): - """Represents a stateful service replica. This includes information about the identity, role, status, health, node name, uptime, and other details about the replica. + """Represents a stateful service replica. This includes information about the + identity, role, status, health, node name, uptime, and other details about + the replica. All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of + the replica in seconds. :type last_in_build_duration_in_seconds: str - :param replica_role: The role of a replica of a stateful service. Possible values include: - "Unknown", "None", "Primary", "IdleSecondary", "ActiveSecondary". + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_role: The role of a replica of a stateful service. Possible + values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', + 'ActiveSecondary' :type replica_role: str or ~azure.servicefabric.models.ReplicaRole - :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to - uniquely identify a replica of a partition. It is unique within a partition and does not change - for the lifetime of the replica. If a replica gets dropped and another replica gets created on - the same node for the same partition, it will get a different value for the id. Sometimes the - id of a stateless service instance is also referred as a replica id. + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. :type replica_id: str """ @@ -25420,61 +21331,55 @@ class StatefulServiceReplicaInfo(ReplicaInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__( - self, - *, - replica_status: Optional[Union[str, "ReplicaStatus"]] = None, - health_state: Optional[Union[str, "HealthState"]] = None, - node_name: Optional[str] = None, - address: Optional[str] = None, - last_in_build_duration_in_seconds: Optional[str] = None, - replica_role: Optional[Union[str, "ReplicaRole"]] = None, - replica_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, replica_status=None, health_state=None, node_name: str=None, address: str=None, last_in_build_duration_in_seconds: str=None, replica_role=None, replica_id: str=None, **kwargs) -> None: super(StatefulServiceReplicaInfo, self).__init__(replica_status=replica_status, health_state=health_state, node_name=node_name, address=address, last_in_build_duration_in_seconds=last_in_build_duration_in_seconds, **kwargs) - self.service_kind = 'Stateful' # type: str self.replica_role = replica_role self.replica_id = replica_id + self.service_kind = 'Stateful' class StatefulServiceTypeDescription(ServiceTypeDescription): - """Describes a stateful service type defined in the service manifest of a provisioned application type. + """Describes a stateful service type defined in the service manifest of a + provisioned application type. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. - Possible values include: "Invalid", "Stateless", "Stateful". - :type kind: str or ~azure.servicefabric.models.ServiceKind - :param is_stateful: Indicates whether the service type is a stateful service type or a - stateless service type. This property is true if the service type is a stateful service type, - false otherwise. + :param is_stateful: Indicates whether the service type is a stateful + service type or a stateless service type. This property is true if the + service type is a stateful service type, false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when instantiating this - service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when + instantiating this service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy descriptions. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy + descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param has_persisted_state: A flag indicating whether this is a persistent service which stores - states on the local disk. If it is then the value of this property is true, if not it is false. + :type extensions: + list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param kind: Required. Constant filled by server. + :type kind: str + :param has_persisted_state: A flag indicating whether this is a persistent + service which stores states on the local disk. If it is then the value of + this property is true, if not it is false. :type has_persisted_state: bool """ @@ -25483,31 +21388,20 @@ class StatefulServiceTypeDescription(ServiceTypeDescription): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__( - self, - *, - is_stateful: Optional[bool] = None, - service_type_name: Optional[str] = None, - placement_constraints: Optional[str] = None, - load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, - service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, - extensions: Optional[List["ServiceTypeExtensionDescription"]] = None, - has_persisted_state: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, is_stateful: bool=None, service_type_name: str=None, placement_constraints: str=None, load_metrics=None, service_placement_policies=None, extensions=None, has_persisted_state: bool=None, **kwargs) -> None: super(StatefulServiceTypeDescription, self).__init__(is_stateful=is_stateful, service_type_name=service_type_name, placement_constraints=placement_constraints, load_metrics=load_metrics, service_placement_policies=service_placement_policies, extensions=extensions, **kwargs) - self.kind = 'Stateful' # type: str self.has_persisted_state = has_persisted_state + self.kind = 'Stateful' class StatefulServiceUpdateDescription(ServiceUpdateDescription): @@ -25515,96 +21409,111 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and - QuorumLossWaitDuration (4) are set. - - - * None - Does not indicate any other properties are set. The value is zero. - * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property - (for Stateful services) or the InstanceCount property (for Stateless services) is set. The - value is 1. - * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The - value is 2. - * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is - 4. - * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The - value is 8. - * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. - * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. - * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is - 64. - * Correlation - Indicates the CorrelationScheme property is set. The value is 128. - * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. - * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. - * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. - * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The - value is 2048. - * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. - * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is - 8192. - * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 16384. - * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 32768. - * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value - is 65536. - * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. - * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. - * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - TargetReplicaSetSize/InstanceCount - Indicates whether the + TargetReplicaSetSize property (for Stateful services) or the InstanceCount + property (for Stateless services) is set. The value is 1. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 2. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 4. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 8. + - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The + value is 16. + - PlacementConstraints - Indicates the PlacementConstraints property is + set. The value is 32. + - PlacementPolicyList - Indicates the ServicePlacementPolicies property is + set. The value is 64. + - Correlation - Indicates the CorrelationScheme property is set. The value + is 128. + - Metrics - Indicates the ServiceLoadMetrics property is set. The value is + 256. + - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The + value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. + - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit + property is set. The value is 2048. + - MinInstanceCount - Indicates the MinInstanceCount property is set. The + value is 4096. + - MinInstancePercentage - Indicates the MinInstancePercentage property is + set. The value is 8192. + - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 16384. + - InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 32768. + - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property + is set. The value is 65536. + - ServiceDnsName - Indicates the ServiceDnsName property is set. The value + is 131072. + - TagsForPlacement - Indicates the TagsForPlacement property is set. The + value is 1048576. + - TagsForRunning - Indicates the TagsForRunning property is set. The value + is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param service_dns_name: The DNS name of the service. :type service_dns_name: str :param tags_for_placement: Tags for placement of this service. :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription :param tags_for_running: Tags for running of this service. :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: int :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: int - :param replica_restart_wait_duration_seconds: The duration, in seconds, between when a replica - goes down and when a new replica is created. + :param replica_restart_wait_duration_seconds: The duration, in seconds, + between when a replica goes down and when a new replica is created. :type replica_restart_wait_duration_seconds: str - :param quorum_loss_wait_duration_seconds: The maximum duration, in seconds, for which a - partition is allowed to be in a state of quorum loss. + :param quorum_loss_wait_duration_seconds: The maximum duration, in + seconds, for which a partition is allowed to be in a state of quorum loss. :type quorum_loss_wait_duration_seconds: str - :param stand_by_replica_keep_duration_seconds: The definition on how long StandBy replicas - should be maintained before being removed. + :param stand_by_replica_keep_duration_seconds: The definition on how long + StandBy replicas should be maintained before being removed. :type stand_by_replica_keep_duration_seconds: str - :param service_placement_time_limit_seconds: The duration for which replicas can stay InBuild - before reporting that build is stuck. + :param service_placement_time_limit_seconds: The duration for which + replicas can stay InBuild before reporting that build is stuck. :type service_placement_time_limit_seconds: str - :param drop_source_replica_on_move: Indicates whether to drop source Secondary replica even if - the target replica has not finished build. If desired behavior is to drop it as soon as - possible the value of this property is true, if not it is false. + :param drop_source_replica_on_move: Indicates whether to drop source + Secondary replica even if the target replica has not finished build. If + desired behavior is to drop it as soon as possible the value of this + property is true, if not it is false. :type drop_source_replica_on_move: bool - :param replica_lifecycle_description: Defines how replicas of this service will behave during - their lifecycle. - :type replica_lifecycle_description: ~azure.servicefabric.models.ReplicaLifecycleDescription + :param replica_lifecycle_description: Defines how replicas of this service + will behave during their lifecycle. + :type replica_lifecycle_description: + ~azure.servicefabric.models.ReplicaLifecycleDescription """ _validation = { @@ -25614,7 +21523,6 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -25625,6 +21533,7 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, 'replica_restart_wait_duration_seconds': {'key': 'ReplicaRestartWaitDurationSeconds', 'type': 'str'}, @@ -25635,31 +21544,8 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'replica_lifecycle_description': {'key': 'ReplicaLifecycleDescription', 'type': 'ReplicaLifecycleDescription'}, } - def __init__( - self, - *, - flags: Optional[str] = None, - placement_constraints: Optional[str] = None, - correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, - load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, - service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, - default_move_cost: Optional[Union[str, "MoveCost"]] = None, - scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, - service_dns_name: Optional[str] = None, - tags_for_placement: Optional["NodeTagsDescription"] = None, - tags_for_running: Optional["NodeTagsDescription"] = None, - target_replica_set_size: Optional[int] = None, - min_replica_set_size: Optional[int] = None, - replica_restart_wait_duration_seconds: Optional[str] = None, - quorum_loss_wait_duration_seconds: Optional[str] = None, - stand_by_replica_keep_duration_seconds: Optional[str] = None, - service_placement_time_limit_seconds: Optional[str] = None, - drop_source_replica_on_move: Optional[bool] = None, - replica_lifecycle_description: Optional["ReplicaLifecycleDescription"] = None, - **kwargs - ): + def __init__(self, *, flags: str=None, placement_constraints: str=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, scaling_policies=None, service_dns_name: str=None, tags_for_placement=None, tags_for_running=None, target_replica_set_size: int=None, min_replica_set_size: int=None, replica_restart_wait_duration_seconds: str=None, quorum_loss_wait_duration_seconds: str=None, stand_by_replica_keep_duration_seconds: str=None, service_placement_time_limit_seconds: str=None, drop_source_replica_on_move: bool=None, replica_lifecycle_description=None, **kwargs) -> None: super(StatefulServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, scaling_policies=scaling_policies, service_dns_name=service_dns_name, tags_for_placement=tags_for_placement, tags_for_running=tags_for_running, **kwargs) - self.service_kind = 'Stateful' # type: str self.target_replica_set_size = target_replica_set_size self.min_replica_set_size = min_replica_set_size self.replica_restart_wait_duration_seconds = replica_restart_wait_duration_seconds @@ -25668,6 +21554,7 @@ def __init__( self.service_placement_time_limit_seconds = service_placement_time_limit_seconds self.drop_source_replica_on_move = drop_source_replica_on_move self.replica_lifecycle_description = replica_lifecycle_description + self.service_kind = 'Stateful' class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): @@ -25675,48 +21562,31 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param source_id: Required. Id of report source. :type source_id: str @@ -25730,16 +21600,17 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'source_id': {'required': True}, @@ -25753,11 +21624,11 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -25770,27 +21641,8 @@ class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - replica_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(StatelessReplicaHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) - self.kind = 'StatelessReplicaHealthReportExpired' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -25799,6 +21651,7 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'StatelessReplicaHealthReportExpired' class StatelessReplicaNewHealthReportEvent(ReplicaEvent): @@ -25806,48 +21659,31 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of FabricEvent.Constant filled by server. Possible values - include: "ClusterEvent", "ContainerInstanceEvent", "NodeEvent", "ApplicationEvent", - "ServiceEvent", "PartitionEvent", "ReplicaEvent", "PartitionAnalysisEvent", - "ApplicationCreated", "ApplicationDeleted", "ApplicationNewHealthReport", - "ApplicationHealthReportExpired", "ApplicationUpgradeCompleted", - "ApplicationUpgradeDomainCompleted", "ApplicationUpgradeRollbackCompleted", - "ApplicationUpgradeRollbackStarted", "ApplicationUpgradeStarted", - "DeployedApplicationNewHealthReport", "DeployedApplicationHealthReportExpired", - "ApplicationProcessExited", "ApplicationContainerInstanceExited", "NodeAborted", - "NodeAddedToCluster", "NodeClosed", "NodeDeactivateCompleted", "NodeDeactivateStarted", - "NodeDown", "NodeNewHealthReport", "NodeHealthReportExpired", "NodeOpenSucceeded", - "NodeOpenFailed", "NodeRemovedFromCluster", "NodeUp", "PartitionNewHealthReport", - "PartitionHealthReportExpired", "PartitionReconfigured", "PartitionPrimaryMoveAnalysis", - "ServiceCreated", "ServiceDeleted", "ServiceNewHealthReport", "ServiceHealthReportExpired", - "DeployedServicePackageNewHealthReport", "DeployedServicePackageHealthReportExpired", - "StatefulReplicaNewHealthReport", "StatefulReplicaHealthReportExpired", - "StatelessReplicaNewHealthReport", "StatelessReplicaHealthReportExpired", - "ClusterNewHealthReport", "ClusterHealthReportExpired", "ClusterUpgradeCompleted", - "ClusterUpgradeDomainCompleted", "ClusterUpgradeRollbackCompleted", - "ClusterUpgradeRollbackStarted", "ClusterUpgradeStarted", "ChaosStopped", "ChaosStarted", - "ChaosCodePackageRestartScheduled", "ChaosReplicaRemovalScheduled", - "ChaosPartitionSecondaryMoveScheduled", "ChaosPartitionPrimaryMoveScheduled", - "ChaosReplicaRestartScheduled", "ChaosNodeRestartScheduled". - :type kind: str or ~azure.servicefabric.models.FabricEventKind - :param event_instance_id: Required. The identifier for the FabricEvent instance. + :param event_instance_id: Required. The identifier for the FabricEvent + instance. :type event_instance_id: str :param category: The category of event. :type category: str :param time_stamp: Required. The time event was logged. - :type time_stamp: ~datetime.datetime - :param has_correlated_events: Shows there is existing related events available. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. :type has_correlated_events: bool - :param partition_id: Required. An internal ID used by Service Fabric to uniquely identify a - partition. This is a randomly generated GUID when the service was created. The partition ID is - unique and does not change for the lifetime of the service. If the same service was deleted and - recreated the IDs of its partitions would be different. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. :type partition_id: str - :param replica_id: Required. Id of a stateful service replica. ReplicaId is used by Service - Fabric to uniquely identify a replica of a partition. It is unique within a partition and does - not change for the lifetime of the replica. If a replica gets dropped and another replica gets - created on the same node for the same partition, it will get a different value for the id. - Sometimes the id of a stateless service instance is also referred as a replica id. + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. :type replica_id: long :param source_id: Required. Id of report source. :type source_id: str @@ -25861,16 +21697,17 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): :type sequence_number: long :param description: Required. Description of report. :type description: str - :param remove_when_expired: Required. Indicates the removal when it expires. + :param remove_when_expired: Required. Indicates the removal when it + expires. :type remove_when_expired: bool :param source_utc_timestamp: Required. Source time. - :type source_utc_timestamp: ~datetime.datetime + :type source_utc_timestamp: datetime """ _validation = { - 'kind': {'required': True}, 'event_instance_id': {'required': True}, 'time_stamp': {'required': True}, + 'kind': {'required': True}, 'partition_id': {'required': True}, 'replica_id': {'required': True}, 'source_id': {'required': True}, @@ -25884,11 +21721,11 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, 'category': {'key': 'Category', 'type': 'str'}, 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, 'source_id': {'key': 'SourceId', 'type': 'str'}, @@ -25901,27 +21738,8 @@ class StatelessReplicaNewHealthReportEvent(ReplicaEvent): 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, } - def __init__( - self, - *, - event_instance_id: str, - time_stamp: datetime.datetime, - partition_id: str, - replica_id: int, - source_id: str, - property: str, - health_state: str, - time_to_live_ms: int, - sequence_number: int, - description: str, - remove_when_expired: bool, - source_utc_timestamp: datetime.datetime, - category: Optional[str] = None, - has_correlated_events: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, category: str=None, has_correlated_events: bool=None, **kwargs) -> None: super(StatelessReplicaNewHealthReportEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) - self.kind = 'StatelessReplicaNewHealthReport' # type: str self.source_id = source_id self.property = property self.health_state = health_state @@ -25930,6 +21748,7 @@ def __init__( self.description = description self.remove_when_expired = remove_when_expired self.source_utc_timestamp = source_utc_timestamp + self.kind = 'StatelessReplicaNewHealthReport' class StatelessServiceDescription(ServiceDescription): @@ -25937,128 +21756,143 @@ class StatelessServiceDescription(ServiceDescription): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param application_name: The name of the application, including the 'fabric:' URI scheme. + :param application_name: The name of the application, including the + 'fabric:' URI scheme. :type application_name: str - :param service_name: Required. The full name of the service with 'fabric:' URI scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Required. Name of the service type as specified in the service - manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str - :param initialization_data: The initialization data as an array of bytes. Initialization data - is passed to service instances or replicas when they are created. + :param initialization_data: The initialization data as an array of bytes. + Initialization data is passed to service instances or replicas when they + are created. :type initialization_data: list[int] - :param partition_description: Required. The partition description as an object. - :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param partition_description: Required. The partition description as an + object. + :type partition_description: + ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics. - :type service_load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type service_load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param is_default_move_cost_specified: Indicates if the DefaultMoveCost property is specified. + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost + property is specified. :type is_default_move_cost_specified: bool - :param service_package_activation_mode: The activation mode of service package to be used for a - service. Possible values include: "SharedProcess", "ExclusiveProcess". + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' :type service_package_activation_mode: str or ~azure.servicefabric.models.ServicePackageActivationMode - :param service_dns_name: The DNS name of the service. It requires the DNS system service to be - enabled in Service Fabric cluster. + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. :type service_dns_name: str :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param tags_required_to_place: Tags for placement of this service. - :type tags_required_to_place: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_place: + ~azure.servicefabric.models.NodeTagsDescription :param tags_required_to_run: Tags for running of this service. - :type tags_required_to_run: ~azure.servicefabric.models.NodeTagsDescription + :type tags_required_to_run: + ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param instance_count: Required. The instance count. :type instance_count: int - :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up - to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted - into the number of nodes on which the instances are allowed to be placed according to the - placement constraints on the service. + :param min_instance_count: MinInstanceCount is the minimum number of + instances that must be up to meet the EnsureAvailability safety check + during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation + -1 is first converted into the number of nodes on which the instances are + allowed to be placed according to the placement constraints on the + service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum percentage of - InstanceCount that must be up to meet the EnsureAvailability safety check during operations - like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first - converted into the number of nodes on which the instances are allowed to be placed according to - the placement constraints on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum + percentage of InstanceCount that must be up to meet the EnsureAvailability + safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage + computation, -1 is first converted into the number of nodes on which the + instances are allowed to be placed according to the placement constraints + on the service. :type min_instance_percentage: int - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 1 then the flags for InstanceCloseDelayDuration is set. - - - * None - Does not indicate any other properties are set. The value is zero. - * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 1. - * InstanceRestartWaitDuration - Indicates the InstanceRestartWaitDurationSeconds property is - set. The value is 2. + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 1 then the flags for + InstanceCloseDelayDuration is set. + - None - Does not indicate any other properties are set. The value is + zero. + - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 1. + - InstanceRestartWaitDuration - Indicates the + InstanceRestartWaitDurationSeconds property is set. The value is 2. :type flags: int - :param instance_close_delay_duration_seconds: Duration in seconds, to wait before a stateless - instance is closed, to allow the active requests to drain gracefully. This would be effective - when the instance is closing during the application/cluster upgrade and disabling node. - The endpoint exposed on this instance is removed prior to starting the delay, which prevents - new connections to this instance. + :param instance_close_delay_duration_seconds: Duration in seconds, to wait + before a stateless instance is closed, to allow the active requests to + drain gracefully. This would be effective when the instance is closing + during the application/cluster upgrade and disabling node. + The endpoint exposed on this instance is removed prior to starting the + delay, which prevents new connections to this instance. In addition, clients that have subscribed to service endpoint change events(https://docs.microsoft.com/dotnet/api/system.fabric.fabricclient.servicemanagementclient.registerservicenotificationfilterasync), can do the following upon receiving the endpoint removal notification: - - .. code-block:: - - - Stop sending new requests to this instance. - - Close existing connections after in-flight requests have completed. - - Connect to a different instance of the service partition for future requests. - - Note, the default value of InstanceCloseDelayDuration is 0, which indicates that there won't - be any delay or removal of the endpoint prior to closing the instance. + - Stop sending new requests to this instance. + - Close existing connections after in-flight requests have completed. + - Connect to a different instance of the service partition for future + requests. + Note, the default value of InstanceCloseDelayDuration is 0, which + indicates that there won't be any delay or removal of the endpoint prior + to closing the instance. :type instance_close_delay_duration_seconds: long - :param instance_lifecycle_description: Defines how instances of this service will behave during - their lifecycle. - :type instance_lifecycle_description: ~azure.servicefabric.models.InstanceLifecycleDescription - :param instance_restart_wait_duration_seconds: When a stateless instance goes down, this timer - starts. When it expires Service Fabric will create a new instance on any node in the cluster. - This configuration is to reduce unnecessary creation of a new instance in situations where the - instance going down is likely to recover in a short time. For example, during an upgrade. - The default value is 0, which indicates that when stateless instance goes down, Service Fabric - will immediately start building its replacement. + :param instance_lifecycle_description: Defines how instances of this + service will behave during their lifecycle. + :type instance_lifecycle_description: + ~azure.servicefabric.models.InstanceLifecycleDescription + :param instance_restart_wait_duration_seconds: When a stateless instance + goes down, this timer starts. When it expires Service Fabric will create a + new instance on any node in the cluster. + This configuration is to reduce unnecessary creation of a new instance in + situations where the instance going down is likely to recover in a short + time. For example, during an upgrade. + The default value is 0, which indicates that when stateless instance goes + down, Service Fabric will immediately start building its replacement. :type instance_restart_wait_duration_seconds: long """ _validation = { - 'service_kind': {'required': True}, 'service_name': {'required': True}, 'service_type_name': {'required': True}, 'partition_description': {'required': True}, + 'service_kind': {'required': True}, 'instance_count': {'required': True, 'minimum': -1}, - 'min_instance_count': {'minimum': 1}, - 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, 'instance_close_delay_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, 'instance_restart_wait_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_name': {'key': 'ServiceName', 'type': 'str'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, @@ -26075,6 +21909,7 @@ class StatelessServiceDescription(ServiceDescription): 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'tags_required_to_place': {'key': 'TagsRequiredToPlace', 'type': 'NodeTagsDescription'}, 'tags_required_to_run': {'key': 'TagsRequiredToRun', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, @@ -26084,36 +21919,8 @@ class StatelessServiceDescription(ServiceDescription): 'instance_restart_wait_duration_seconds': {'key': 'InstanceRestartWaitDurationSeconds', 'type': 'long'}, } - def __init__( - self, - *, - service_name: str, - service_type_name: str, - partition_description: "PartitionSchemeDescription", - instance_count: int, - application_name: Optional[str] = None, - initialization_data: Optional[List[int]] = None, - placement_constraints: Optional[str] = None, - correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, - service_load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, - service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, - default_move_cost: Optional[Union[str, "MoveCost"]] = None, - is_default_move_cost_specified: Optional[bool] = None, - service_package_activation_mode: Optional[Union[str, "ServicePackageActivationMode"]] = None, - service_dns_name: Optional[str] = None, - scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, - tags_required_to_place: Optional["NodeTagsDescription"] = None, - tags_required_to_run: Optional["NodeTagsDescription"] = None, - min_instance_count: Optional[int] = 1, - min_instance_percentage: Optional[int] = 0, - flags: Optional[int] = None, - instance_close_delay_duration_seconds: Optional[int] = None, - instance_lifecycle_description: Optional["InstanceLifecycleDescription"] = None, - instance_restart_wait_duration_seconds: Optional[int] = None, - **kwargs - ): + def __init__(self, *, service_name: str, service_type_name: str, partition_description, instance_count: int, application_name: str=None, initialization_data=None, placement_constraints: str=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified: bool=None, service_package_activation_mode=None, service_dns_name: str=None, scaling_policies=None, tags_required_to_place=None, tags_required_to_run=None, min_instance_count: int=None, min_instance_percentage: int=None, flags: int=None, instance_close_delay_duration_seconds: int=None, instance_lifecycle_description=None, instance_restart_wait_duration_seconds: int=None, **kwargs) -> None: super(StatelessServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name, scaling_policies=scaling_policies, tags_required_to_place=tags_required_to_place, tags_required_to_run=tags_required_to_run, **kwargs) - self.service_kind = 'Stateless' # type: str self.instance_count = instance_count self.min_instance_count = min_instance_count self.min_instance_percentage = min_instance_percentage @@ -26121,6 +21928,7 @@ def __init__( self.instance_close_delay_duration_seconds = instance_close_delay_duration_seconds self.instance_lifecycle_description = instance_lifecycle_description self.instance_restart_wait_duration_seconds = instance_restart_wait_duration_seconds + self.service_kind = 'Stateless' class StatelessServiceInfo(ServiceInfo): @@ -26128,31 +21936,33 @@ class StatelessServiceInfo(ServiceInfo): All required parameters must be populated in order to send to Azure. - :param id: The identity of the service. This ID is an encoded representation of the service - name. This is used in the REST APIs to identify the service resource. - Starting in version 6.0, hierarchical names are delimited with the "~" character. For example, - if the service name is "fabric:/myapp/app1/svc1", - the service identity would be "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous - versions. + :param id: The identity of the service. This ID is an encoded + representation of the service name. This is used in the REST APIs to + identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. :type id: str - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param name: The full name of the service with 'fabric:' URI scheme. :type name: str - :param type_name: Name of the service type as specified in the service manifest. + :param type_name: Name of the service type as specified in the service + manifest. :type type_name: str :param manifest_version: The version of the service manifest. :type manifest_version: str - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param service_status: The status of the application. Possible values include: "Unknown", - "Active", "Upgrading", "Deleting", "Creating", "Failed". + :param service_status: The status of the application. Possible values + include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', + 'Failed' :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool + :param service_kind: Required. Constant filled by server. + :type service_kind: str """ _validation = { @@ -26161,61 +21971,53 @@ class StatelessServiceInfo(ServiceInfo): _attribute_map = { 'id': {'key': 'Id', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'name': {'key': 'Name', 'type': 'str'}, 'type_name': {'key': 'TypeName', 'type': 'str'}, 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } - def __init__( - self, - *, - id: Optional[str] = None, - name: Optional[str] = None, - type_name: Optional[str] = None, - manifest_version: Optional[str] = None, - health_state: Optional[Union[str, "HealthState"]] = None, - service_status: Optional[Union[str, "ServiceStatus"]] = None, - is_service_group: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, **kwargs) -> None: super(StatelessServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group, **kwargs) - self.service_kind = 'Stateless' # type: str + self.service_kind = 'Stateless' class StatelessServiceInstanceHealth(ReplicaHealth): """Represents the health of the stateless service instance. -Contains the instance aggregated health state, the health events and the unhealthy evaluations. + Contains the instance aggregated health state, the health events and the + unhealthy evaluations. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The HealthState representing the aggregated health state of the - entity computed by Health Manager. - The health evaluation of the entity reflects all events reported on the entity and its - children (if any). - The aggregation is done by applying the desired health policy. Possible values include: - "Invalid", "Ok", "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. :type health_events: list[~azure.servicefabric.models.HealthEvent] - :param unhealthy_evaluations: The unhealthy evaluations that show why the current aggregated - health state was returned by Health Manager. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] - :param health_statistics: Shows the health statistics for all children types of the queried - entity. + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. :type health_statistics: ~azure.servicefabric.models.HealthStatistics - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to - uniquely identify an instance of a partition of a stateless service. It is unique within a - partition and does not change for the lifetime of the instance. If the instance has failed over - on the same or different node, it will get a different value for the InstanceId. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -26228,43 +22030,36 @@ class StatelessServiceInstanceHealth(ReplicaHealth): 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - health_events: Optional[List["HealthEvent"]] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - health_statistics: Optional["HealthStatistics"] = None, - partition_id: Optional[str] = None, - instance_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, instance_id: str=None, **kwargs) -> None: super(StatelessServiceInstanceHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, partition_id=partition_id, **kwargs) - self.service_kind = 'Stateless' # type: str self.instance_id = instance_id + self.service_kind = 'Stateless' class StatelessServiceInstanceHealthState(ReplicaHealthState): - """Represents the health state of the stateless service instance, which contains the instance ID and the aggregated health state. + """Represents the health state of the stateless service instance, which + contains the instance ID and the aggregated health state. All required parameters must be populated in order to send to Azure. - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param partition_id: The ID of the partition to which this replica belongs. + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: The ID of the partition to which this replica + belongs. :type partition_id: str - :param replica_id: Id of the stateless service instance on the wire this field is called - ReplicaId. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of the stateless service instance on the wire this + field is called ReplicaId. :type replica_id: str """ @@ -26274,49 +22069,46 @@ class StatelessServiceInstanceHealthState(ReplicaHealthState): _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - partition_id: Optional[str] = None, - replica_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, partition_id: str=None, replica_id: str=None, **kwargs) -> None: super(StatelessServiceInstanceHealthState, self).__init__(aggregated_health_state=aggregated_health_state, partition_id=partition_id, **kwargs) - self.service_kind = 'Stateless' # type: str self.replica_id = replica_id + self.service_kind = 'Stateless' class StatelessServiceInstanceInfo(ReplicaInfo): - """Represents a stateless service instance. This includes information about the identity, status, health, node name, uptime, and other details about the instance. + """Represents a stateless service instance. This includes information about + the identity, status, health, node name, uptime, and other details about + the instance. All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param replica_status: The status of a replica of a service. Possible values include: - "Invalid", "InBuild", "Standby", "Ready", "Down", "Dropped". + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_name: The name of a Service Fabric node. :type node_name: str :param address: The address the replica is listening on. :type address: str - :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. + :param last_in_build_duration_in_seconds: The last in build duration of + the replica in seconds. :type last_in_build_duration_in_seconds: str - :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to - uniquely identify an instance of a partition of a stateless service. It is unique within a - partition and does not change for the lifetime of the instance. If the instance has failed over - on the same or different node, it will get a different value for the InstanceId. + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. :type instance_id: str """ @@ -26325,29 +22117,19 @@ class StatelessServiceInstanceInfo(ReplicaInfo): } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'address': {'key': 'Address', 'type': 'str'}, 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__( - self, - *, - replica_status: Optional[Union[str, "ReplicaStatus"]] = None, - health_state: Optional[Union[str, "HealthState"]] = None, - node_name: Optional[str] = None, - address: Optional[str] = None, - last_in_build_duration_in_seconds: Optional[str] = None, - instance_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, replica_status=None, health_state=None, node_name: str=None, address: str=None, last_in_build_duration_in_seconds: str=None, instance_id: str=None, **kwargs) -> None: super(StatelessServiceInstanceInfo, self).__init__(replica_status=replica_status, health_state=health_state, node_name=node_name, address=address, last_in_build_duration_in_seconds=last_in_build_duration_in_seconds, **kwargs) - self.service_kind = 'Stateless' # type: str self.instance_id = instance_id + self.service_kind = 'Stateless' class StatelessServicePartitionInfo(ServicePartitionInfo): @@ -26355,102 +22137,100 @@ class StatelessServicePartitionInfo(ServicePartitionInfo): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by - server. Possible values include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param health_state: The health state of a Service Fabric entity such as Cluster, Node, - Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState - :param partition_status: The status of the service fabric service partition. Possible values - include: "Invalid", "Ready", "NotReady", "InQuorumLoss", "Reconfiguring", "Deleting". - :type partition_status: str or ~azure.servicefabric.models.ServicePartitionStatus - :param partition_information: Information about the partition identity, partitioning scheme and - keys supported by it. - :type partition_information: ~azure.servicefabric.models.PartitionInformation + :param partition_status: The status of the service fabric service + partition. Possible values include: 'Invalid', 'Ready', 'NotReady', + 'InQuorumLoss', 'Reconfiguring', 'Deleting' + :type partition_status: str or + ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, + partitioning scheme and keys supported by it. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param instance_count: Number of instances of this partition. :type instance_count: long - :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up - to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted - into the number of nodes on which the instances are allowed to be placed according to the - placement constraints on the service. + :param min_instance_count: MinInstanceCount is the minimum number of + instances that must be up to meet the EnsureAvailability safety check + during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation + -1 is first converted into the number of nodes on which the instances are + allowed to be placed according to the placement constraints on the + service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum percentage of - InstanceCount that must be up to meet the EnsureAvailability safety check during operations - like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first - converted into the number of nodes on which the instances are allowed to be placed according to - the placement constraints on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum + percentage of InstanceCount that must be up to meet the EnsureAvailability + safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage + computation, -1 is first converted into the number of nodes on which the + instances are allowed to be placed according to the placement constraints + on the service. :type min_instance_percentage: int """ _validation = { 'service_kind': {'required': True}, - 'min_instance_count': {'minimum': 1}, - 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'health_state': {'key': 'HealthState', 'type': 'str'}, 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'long'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, } - def __init__( - self, - *, - health_state: Optional[Union[str, "HealthState"]] = None, - partition_status: Optional[Union[str, "ServicePartitionStatus"]] = None, - partition_information: Optional["PartitionInformation"] = None, - instance_count: Optional[int] = None, - min_instance_count: Optional[int] = 1, - min_instance_percentage: Optional[int] = 0, - **kwargs - ): + def __init__(self, *, health_state=None, partition_status=None, partition_information=None, instance_count: int=None, min_instance_count: int=None, min_instance_percentage: int=None, **kwargs) -> None: super(StatelessServicePartitionInfo, self).__init__(health_state=health_state, partition_status=partition_status, partition_information=partition_information, **kwargs) - self.service_kind = 'Stateless' # type: str self.instance_count = instance_count self.min_instance_count = min_instance_count self.min_instance_percentage = min_instance_percentage + self.service_kind = 'Stateless' class StatelessServiceTypeDescription(ServiceTypeDescription): - """Describes a stateless service type defined in the service manifest of a provisioned application type. + """Describes a stateless service type defined in the service manifest of a + provisioned application type. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. - Possible values include: "Invalid", "Stateless", "Stateful". - :type kind: str or ~azure.servicefabric.models.ServiceKind - :param is_stateful: Indicates whether the service type is a stateful service type or a - stateless service type. This property is true if the service type is a stateful service type, - false otherwise. + :param is_stateful: Indicates whether the service type is a stateful + service type or a stateless service type. This property is true if the + service type is a stateful service type, false otherwise. :type is_stateful: bool - :param service_type_name: Name of the service type as specified in the service manifest. + :param service_type_name: Name of the service type as specified in the + service manifest. :type service_type_name: str - :param placement_constraints: The placement constraint to be used when instantiating this - service in a Service Fabric cluster. + :param placement_constraints: The placement constraint to be used when + instantiating this service in a Service Fabric cluster. :type placement_constraints: str :param load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] - :param service_placement_policies: List of service placement policy descriptions. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy + descriptions. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] :param extensions: List of service type extensions. - :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param use_implicit_host: A flag indicating if this type is not implemented and hosted by a - user service process, but is implicitly hosted by a system created process. This value is true - for services using the guest executable services, false otherwise. + :type extensions: + list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param kind: Required. Constant filled by server. + :type kind: str + :param use_implicit_host: A flag indicating if this type is not + implemented and hosted by a user service process, but is implicitly hosted + by a system created process. This value is true for services using the + guest executable services, false otherwise. :type use_implicit_host: bool """ @@ -26459,31 +22239,20 @@ class StatelessServiceTypeDescription(ServiceTypeDescription): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'use_implicit_host': {'key': 'UseImplicitHost', 'type': 'bool'}, } - def __init__( - self, - *, - is_stateful: Optional[bool] = None, - service_type_name: Optional[str] = None, - placement_constraints: Optional[str] = None, - load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, - service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, - extensions: Optional[List["ServiceTypeExtensionDescription"]] = None, - use_implicit_host: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, is_stateful: bool=None, service_type_name: str=None, placement_constraints: str=None, load_metrics=None, service_placement_policies=None, extensions=None, use_implicit_host: bool=None, **kwargs) -> None: super(StatelessServiceTypeDescription, self).__init__(is_stateful=is_stateful, service_type_name=service_type_name, placement_constraints=placement_constraints, load_metrics=load_metrics, service_placement_policies=service_placement_policies, extensions=extensions, **kwargs) - self.kind = 'Stateless' # type: str self.use_implicit_host = use_implicit_host + self.kind = 'Stateless' class StatelessServiceUpdateDescription(ServiceUpdateDescription): @@ -26491,129 +22260,144 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): All required parameters must be populated in order to send to Azure. - :param service_kind: Required. The service kind.Constant filled by server. Possible values - include: "Invalid", "Stateless", "Stateful". - :type service_kind: str or ~azure.servicefabric.models.ServiceKind - :param flags: Flags indicating whether other properties are set. Each of the associated - properties corresponds to a flag, specified below, which, if set, indicate that the property is - specified. - This property can be a combination of those flags obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then the flags for ReplicaRestartWaitDuration (2) and - QuorumLossWaitDuration (4) are set. - - - * None - Does not indicate any other properties are set. The value is zero. - * TargetReplicaSetSize/InstanceCount - Indicates whether the TargetReplicaSetSize property - (for Stateful services) or the InstanceCount property (for Stateless services) is set. The - value is 1. - * ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration property is set. The - value is 2. - * QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property is set. The value is - 4. - * StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration property is set. The - value is 8. - * MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The value is 16. - * PlacementConstraints - Indicates the PlacementConstraints property is set. The value is 32. - * PlacementPolicyList - Indicates the ServicePlacementPolicies property is set. The value is - 64. - * Correlation - Indicates the CorrelationScheme property is set. The value is 128. - * Metrics - Indicates the ServiceLoadMetrics property is set. The value is 256. - * DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. - * ScalingPolicy - Indicates the ScalingPolicies property is set. The value is 1024. - * ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit property is set. The - value is 2048. - * MinInstanceCount - Indicates the MinInstanceCount property is set. The value is 4096. - * MinInstancePercentage - Indicates the MinInstancePercentage property is set. The value is - 8192. - * InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 16384. - * InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration property is set. The - value is 32768. - * DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property is set. The value - is 65536. - * ServiceDnsName - Indicates the ServiceDnsName property is set. The value is 131072. - * TagsForPlacement - Indicates the TagsForPlacement property is set. The value is 1048576. - * TagsForRunning - Indicates the TagsForRunning property is set. The value is 2097152. + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - TargetReplicaSetSize/InstanceCount - Indicates whether the + TargetReplicaSetSize property (for Stateful services) or the InstanceCount + property (for Stateless services) is set. The value is 1. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 2. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 4. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 8. + - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The + value is 16. + - PlacementConstraints - Indicates the PlacementConstraints property is + set. The value is 32. + - PlacementPolicyList - Indicates the ServicePlacementPolicies property is + set. The value is 64. + - Correlation - Indicates the CorrelationScheme property is set. The value + is 128. + - Metrics - Indicates the ServiceLoadMetrics property is set. The value is + 256. + - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The + value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. + - ServicePlacementTimeLimit - Indicates the ServicePlacementTimeLimit + property is set. The value is 2048. + - MinInstanceCount - Indicates the MinInstanceCount property is set. The + value is 4096. + - MinInstancePercentage - Indicates the MinInstancePercentage property is + set. The value is 8192. + - InstanceCloseDelayDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 16384. + - InstanceRestartWaitDuration - Indicates the InstanceCloseDelayDuration + property is set. The value is 32768. + - DropSourceReplicaOnMove - Indicates the DropSourceReplicaOnMove property + is set. The value is 65536. + - ServiceDnsName - Indicates the ServiceDnsName property is set. The value + is 131072. + - TagsForPlacement - Indicates the TagsForPlacement property is set. The + value is 1048576. + - TagsForRunning - Indicates the TagsForRunning property is set. The value + is 2097152. :type flags: str - :param placement_constraints: The placement constraints as a string. Placement constraints are - boolean expressions on node properties and allow for restricting a service to particular nodes - based on the service requirements. For example, to place a service on nodes where NodeType is + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: The correlation scheme. - :type correlation_scheme: list[~azure.servicefabric.models.ServiceCorrelationDescription] + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] :param load_metrics: The service load metrics. - :type load_metrics: list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: The service placement policies. :type service_placement_policies: list[~azure.servicefabric.models.ServicePlacementPolicyDescription] - :param default_move_cost: The move cost for the service. Possible values include: "Zero", - "Low", "Medium", "High", "VeryHigh". + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High', 'VeryHigh' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost :param scaling_policies: Scaling policies for this service. - :type scaling_policies: list[~azure.servicefabric.models.ScalingPolicyDescription] + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] :param service_dns_name: The DNS name of the service. :type service_dns_name: str :param tags_for_placement: Tags for placement of this service. :type tags_for_placement: ~azure.servicefabric.models.NodeTagsDescription :param tags_for_running: Tags for running of this service. :type tags_for_running: ~azure.servicefabric.models.NodeTagsDescription + :param service_kind: Required. Constant filled by server. + :type service_kind: str :param instance_count: The instance count. :type instance_count: int - :param min_instance_count: MinInstanceCount is the minimum number of instances that must be up - to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted - into the number of nodes on which the instances are allowed to be placed according to the - placement constraints on the service. + :param min_instance_count: MinInstanceCount is the minimum number of + instances that must be up to meet the EnsureAvailability safety check + during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstanceCount computation + -1 is first converted into the number of nodes on which the instances are + allowed to be placed according to the placement constraints on the + service. :type min_instance_count: int - :param min_instance_percentage: MinInstancePercentage is the minimum percentage of - InstanceCount that must be up to meet the EnsureAvailability safety check during operations - like upgrade or deactivate node. - The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * - InstanceCount) ). - Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first - converted into the number of nodes on which the instances are allowed to be placed according to - the placement constraints on the service. + :param min_instance_percentage: MinInstancePercentage is the minimum + percentage of InstanceCount that must be up to meet the EnsureAvailability + safety check during operations like upgrade or deactivate node. + The actual number that is used is max( MinInstanceCount, ceil( + MinInstancePercentage/100.0 * InstanceCount) ). + Note, if InstanceCount is set to -1, during MinInstancePercentage + computation, -1 is first converted into the number of nodes on which the + instances are allowed to be placed according to the placement constraints + on the service. :type min_instance_percentage: int - :param instance_close_delay_duration_seconds: Duration in seconds, to wait before a stateless - instance is closed, to allow the active requests to drain gracefully. This would be effective - when the instance is closing during the application/cluster upgrade and disabling node. - The endpoint exposed on this instance is removed prior to starting the delay, which prevents - new connections to this instance. + :param instance_close_delay_duration_seconds: Duration in seconds, to wait + before a stateless instance is closed, to allow the active requests to + drain gracefully. This would be effective when the instance is closing + during the application/cluster upgrade and disabling node. + The endpoint exposed on this instance is removed prior to starting the + delay, which prevents new connections to this instance. In addition, clients that have subscribed to service endpoint change events(https://docs.microsoft.com/dotnet/api/system.fabric.fabricclient.servicemanagementclient.registerservicenotificationfilterasync), can do the following upon receiving the endpoint removal notification: - - .. code-block:: - - - Stop sending new requests to this instance. - - Close existing connections after in-flight requests have completed. - - Connect to a different instance of the service partition for future requests. + - Stop sending new requests to this instance. + - Close existing connections after in-flight requests have completed. + - Connect to a different instance of the service partition for future + requests. :type instance_close_delay_duration_seconds: str - :param instance_lifecycle_description: Defines how instances of this service will behave during - their lifecycle. - :type instance_lifecycle_description: ~azure.servicefabric.models.InstanceLifecycleDescription - :param instance_restart_wait_duration_seconds: When a stateless instance goes down, this timer - starts. When it expires Service Fabric will create a new instance on any node in the cluster. - This configuration is to reduce unnecessary creation of a new instance in situations where the - instance going down is likely to recover in a short time. For example, during an upgrade. - The default value is 0, which indicates that when stateless instance goes down, Service Fabric - will immediately start building its replacement. + :param instance_lifecycle_description: Defines how instances of this + service will behave during their lifecycle. + :type instance_lifecycle_description: + ~azure.servicefabric.models.InstanceLifecycleDescription + :param instance_restart_wait_duration_seconds: When a stateless instance + goes down, this timer starts. When it expires Service Fabric will create a + new instance on any node in the cluster. + This configuration is to reduce unnecessary creation of a new instance in + situations where the instance going down is likely to recover in a short + time. For example, during an upgrade. + The default value is 0, which indicates that when stateless instance goes + down, Service Fabric will immediately start building its replacement. :type instance_restart_wait_duration_seconds: str """ _validation = { 'service_kind': {'required': True}, 'instance_count': {'minimum': -1}, - 'min_instance_count': {'minimum': 1}, - 'min_instance_percentage': {'maximum': 100, 'minimum': 0}, } _attribute_map = { - 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'flags': {'key': 'Flags', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, @@ -26624,6 +22408,7 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, 'tags_for_placement': {'key': 'TagsForPlacement', 'type': 'NodeTagsDescription'}, 'tags_for_running': {'key': 'TagsForRunning', 'type': 'NodeTagsDescription'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, 'min_instance_percentage': {'key': 'MinInstancePercentage', 'type': 'int'}, @@ -26632,74 +22417,48 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): 'instance_restart_wait_duration_seconds': {'key': 'InstanceRestartWaitDurationSeconds', 'type': 'str'}, } - def __init__( - self, - *, - flags: Optional[str] = None, - placement_constraints: Optional[str] = None, - correlation_scheme: Optional[List["ServiceCorrelationDescription"]] = None, - load_metrics: Optional[List["ServiceLoadMetricDescription"]] = None, - service_placement_policies: Optional[List["ServicePlacementPolicyDescription"]] = None, - default_move_cost: Optional[Union[str, "MoveCost"]] = None, - scaling_policies: Optional[List["ScalingPolicyDescription"]] = None, - service_dns_name: Optional[str] = None, - tags_for_placement: Optional["NodeTagsDescription"] = None, - tags_for_running: Optional["NodeTagsDescription"] = None, - instance_count: Optional[int] = None, - min_instance_count: Optional[int] = 1, - min_instance_percentage: Optional[int] = 0, - instance_close_delay_duration_seconds: Optional[str] = None, - instance_lifecycle_description: Optional["InstanceLifecycleDescription"] = None, - instance_restart_wait_duration_seconds: Optional[str] = None, - **kwargs - ): + def __init__(self, *, flags: str=None, placement_constraints: str=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, scaling_policies=None, service_dns_name: str=None, tags_for_placement=None, tags_for_running=None, instance_count: int=None, min_instance_count: int=None, min_instance_percentage: int=None, instance_close_delay_duration_seconds: str=None, instance_lifecycle_description=None, instance_restart_wait_duration_seconds: str=None, **kwargs) -> None: super(StatelessServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, scaling_policies=scaling_policies, service_dns_name=service_dns_name, tags_for_placement=tags_for_placement, tags_for_running=tags_for_running, **kwargs) - self.service_kind = 'Stateless' # type: str self.instance_count = instance_count self.min_instance_count = min_instance_count self.min_instance_percentage = min_instance_percentage self.instance_close_delay_duration_seconds = instance_close_delay_duration_seconds self.instance_lifecycle_description = instance_lifecycle_description self.instance_restart_wait_duration_seconds = instance_restart_wait_duration_seconds + self.service_kind = 'Stateless' class StoppedChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos stops because either the user issued a stop or the time to run was up. + """Describes a Chaos event that gets generated when Chaos stops because either + the user issued a stop or the time to run was up. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param reason: Describes why Chaos stopped. Chaos can stop because of StopChaos API call or the - timeToRun provided in ChaosParameters is over. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why Chaos stopped. Chaos can stop because of + StopChaos API call or the timeToRun provided in ChaosParameters is over. :type reason: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - *, - time_stamp_utc: datetime.datetime, - reason: Optional[str] = None, - **kwargs - ): + def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: super(StoppedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) - self.kind = 'Stopped' # type: str self.reason = reason + self.kind = 'Stopped' class StringPropertyValue(PropertyValue): @@ -26707,10 +22466,8 @@ class StringPropertyValue(PropertyValue): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property, determined by the type of data. Following are the - possible values.Constant filled by server. Possible values include: "Invalid", "Binary", - "Int64", "Double", "String", "Guid". - :type kind: str or ~azure.servicefabric.models.PropertyValueKind + :param kind: Required. Constant filled by server. + :type kind: str :param data: Required. The data of the property value. :type data: str """ @@ -26725,30 +22482,24 @@ class StringPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__( - self, - *, - data: str, - **kwargs - ): + def __init__(self, *, data: str, **kwargs) -> None: super(StringPropertyValue, self).__init__(**kwargs) - self.kind = 'String' # type: str self.data = data + self.kind = 'String' class SuccessfulPropertyBatchInfo(PropertyBatchInfo): - """Derived from PropertyBatchInfo. Represents the property batch succeeding. Contains the results of any "Get" operations in the batch. + """Derived from PropertyBatchInfo. Represents the property batch succeeding. + Contains the results of any "Get" operations in the batch. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of property batch info, determined by the results of a property - batch. The following are the possible values.Constant filled by server. Possible values - include: "Invalid", "Successful", "Failed". - :type kind: str or ~azure.servicefabric.models.PropertyBatchInfoKind - :param properties: A map containing the properties that were requested through any "Get" - property batch operations. The key represents the index of the "Get" operation in the original - request, in string form. The value is the property. If a property is not found, it will not be - in the map. + :param kind: Required. Constant filled by server. + :type kind: str + :param properties: A map containing the properties that were requested + through any "Get" property batch operations. The key represents the index + of the "Get" operation in the original request, in string form. The value + is the property. If a property is not found, it will not be in the map. :type properties: dict[str, ~azure.servicefabric.models.PropertyInfo] """ @@ -26761,43 +22512,37 @@ class SuccessfulPropertyBatchInfo(PropertyBatchInfo): 'properties': {'key': 'Properties', 'type': '{PropertyInfo}'}, } - def __init__( - self, - *, - properties: Optional[Dict[str, "PropertyInfo"]] = None, - **kwargs - ): + def __init__(self, *, properties=None, **kwargs) -> None: super(SuccessfulPropertyBatchInfo, self).__init__(**kwargs) - self.kind = 'Successful' # type: str self.properties = properties + self.kind = 'Successful' class SystemApplicationHealthEvaluation(HealthEvaluation): - """Represents health evaluation for the fabric:/System application, containing information about the data and the algorithm used by health store to evaluate health. The evaluation is returned only when the aggregated health state of the cluster is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for the fabric:/System application, containing + information about the data and the algorithm used by health store to + evaluate health. The evaluation is returned only when the aggregated health + state of the cluster is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated - health state of the system application. The types of the unhealthy evaluations can be - DeployedApplicationsHealthEvaluation, ServicesHealthEvaluation or EventHealthEvaluation. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param kind: Required. Constant filled by server. + :type kind: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the system application. The types + of the unhealthy evaluations can be DeployedApplicationsHealthEvaluation, + ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -26805,36 +22550,30 @@ class SystemApplicationHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, unhealthy_evaluations=None, **kwargs) -> None: super(SystemApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'SystemApplication' # type: str self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'SystemApplication' -class TcpConfig(msrest.serialization.Model): +class TcpConfig(Model): """Describes the tcp configuration for external connectivity for this network. All required parameters must be populated in order to send to Azure. :param name: Required. tcp gateway config name. :type name: str - :param port: Required. Specifies the port at which the service endpoint below needs to be - exposed. + :param port: Required. Specifies the port at which the service endpoint + below needs to be exposed. :type port: int - :param destination: Required. Describes destination endpoint for routing traffic. + :param destination: Required. Describes destination endpoint for routing + traffic. :type destination: ~azure.servicefabric.models.GatewayDestination """ @@ -26850,14 +22589,7 @@ class TcpConfig(msrest.serialization.Model): 'destination': {'key': 'destination', 'type': 'GatewayDestination'}, } - def __init__( - self, - *, - name: str, - port: int, - destination: "GatewayDestination", - **kwargs - ): + def __init__(self, *, name: str, port: int, destination, **kwargs) -> None: super(TcpConfig, self).__init__(**kwargs) self.name = name self.port = port @@ -26865,44 +22597,41 @@ def __init__( class TestErrorChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when an unexpected event occurs in the Chaos engine. -For example, due to the cluster snapshot being inconsistent, while faulting an entity, Chaos found that the entity was already faulted -- which would be an unexpected event. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param reason: Describes why TestErrorChaosEvent was generated. For example, Chaos tries to - fault a partition but finds that the partition is no longer fault tolerant, then a - TestErrorEvent gets generated with the reason stating that the partition is not fault tolerant. + """Describes a Chaos event that gets generated when an unexpected event occurs + in the Chaos engine. + For example, due to the cluster snapshot being inconsistent, while faulting + an entity, Chaos found that the entity was already faulted -- which would + be an unexpected event. + + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why TestErrorChaosEvent was generated. For + example, Chaos tries to fault a partition but finds that the partition is + no longer fault tolerant, then a TestErrorEvent gets generated with the + reason stating that the partition is not fault tolerant. :type reason: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - *, - time_stamp_utc: datetime.datetime, - reason: Optional[str] = None, - **kwargs - ): + def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: super(TestErrorChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) - self.kind = 'TestError' # type: str self.reason = reason + self.kind = 'TestError' class TimeBasedBackupScheduleDescription(BackupScheduleDescription): @@ -26910,20 +22639,21 @@ class TimeBasedBackupScheduleDescription(BackupScheduleDescription): All required parameters must be populated in order to send to Azure. - :param schedule_kind: Required. The kind of backup schedule, time based or frequency - based.Constant filled by server. Possible values include: "Invalid", "TimeBased", - "FrequencyBased". - :type schedule_kind: str or ~azure.servicefabric.models.BackupScheduleKind - :param schedule_frequency_type: Required. Describes the frequency with which to run the time - based backup schedule. Possible values include: "Invalid", "Daily", "Weekly". - :type schedule_frequency_type: str or ~azure.servicefabric.models.BackupScheduleFrequencyType - :param run_days: List of days of a week when to trigger the periodic backup. This is valid only - when the backup schedule frequency type is weekly. + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + :param schedule_frequency_type: Required. Describes the frequency with + which to run the time based backup schedule. Possible values include: + 'Invalid', 'Daily', 'Weekly' + :type schedule_frequency_type: str or + ~azure.servicefabric.models.BackupScheduleFrequencyType + :param run_days: List of days of a week when to trigger the periodic + backup. This is valid only when the backup schedule frequency type is + weekly. :type run_days: list[str or ~azure.servicefabric.models.DayOfWeek] - :param run_times: Required. Represents the list of exact time during the day in ISO8601 format. - Like '19:00:00' will represent '7PM' during the day. Date specified along with time will be - ignored. - :type run_times: list[~datetime.datetime] + :param run_times: Required. Represents the list of exact time during the + day in ISO8601 format. Like '19:00:00' will represent '7PM' during the + day. Date specified along with time will be ignored. + :type run_times: list[datetime] """ _validation = { @@ -26939,27 +22669,22 @@ class TimeBasedBackupScheduleDescription(BackupScheduleDescription): 'run_times': {'key': 'RunTimes', 'type': '[iso-8601]'}, } - def __init__( - self, - *, - schedule_frequency_type: Union[str, "BackupScheduleFrequencyType"], - run_times: List[datetime.datetime], - run_days: Optional[List[Union[str, "DayOfWeek"]]] = None, - **kwargs - ): + def __init__(self, *, schedule_frequency_type, run_times, run_days=None, **kwargs) -> None: super(TimeBasedBackupScheduleDescription, self).__init__(**kwargs) - self.schedule_kind = 'TimeBased' # type: str self.schedule_frequency_type = schedule_frequency_type self.run_days = run_days self.run_times = run_times + self.schedule_kind = 'TimeBased' -class TimeOfDay(msrest.serialization.Model): +class TimeOfDay(Model): """Defines an hour and minute of the day specified in 24 hour time. - :param hour: Represents the hour of the day. Value must be between 0 and 23 inclusive. + :param hour: Represents the hour of the day. Value must be between 0 and + 23 inclusive. :type hour: int - :param minute: Represents the minute of the hour. Value must be between 0 to 59 inclusive. + :param minute: Represents the minute of the hour. Value must be between 0 + to 59 inclusive. :type minute: int """ @@ -26973,24 +22698,20 @@ class TimeOfDay(msrest.serialization.Model): 'minute': {'key': 'Minute', 'type': 'int'}, } - def __init__( - self, - *, - hour: Optional[int] = None, - minute: Optional[int] = None, - **kwargs - ): + def __init__(self, *, hour: int=None, minute: int=None, **kwargs) -> None: super(TimeOfDay, self).__init__(**kwargs) self.hour = hour self.minute = minute -class TimeRange(msrest.serialization.Model): +class TimeRange(Model): """Defines a time range in a 24 hour day specified by a start and end time. - :param start_time: Defines an hour and minute of the day specified in 24 hour time. + :param start_time: Defines an hour and minute of the day specified in 24 + hour time. :type start_time: ~azure.servicefabric.models.TimeOfDay - :param end_time: Defines an hour and minute of the day specified in 24 hour time. + :param end_time: Defines an hour and minute of the day specified in 24 + hour time. :type end_time: ~azure.servicefabric.models.TimeOfDay """ @@ -26999,32 +22720,28 @@ class TimeRange(msrest.serialization.Model): 'end_time': {'key': 'EndTime', 'type': 'TimeOfDay'}, } - def __init__( - self, - *, - start_time: Optional["TimeOfDay"] = None, - end_time: Optional["TimeOfDay"] = None, - **kwargs - ): + def __init__(self, *, start_time=None, end_time=None, **kwargs) -> None: super(TimeRange, self).__init__(**kwargs) self.start_time = start_time self.end_time = end_time class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): - """Describes a partitioning scheme where an integer range is allocated evenly across a number of partitions. + """Describes a partitioning scheme where an integer range is allocated evenly + across a number of partitions. All required parameters must be populated in order to send to Azure. - :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by - server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". - :type partition_scheme: str or ~azure.servicefabric.models.PartitionScheme + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str :param count: Required. The number of partitions. :type count: int - :param low_key: Required. String indicating the lower bound of the partition key range that + :param low_key: Required. String indicating the lower bound of the + partition key range that should be split between the partitions. :type low_key: str - :param high_key: Required. String indicating the upper bound of the partition key range that + :param high_key: Required. String indicating the upper bound of the + partition key range that should be split between the partitions. :type high_key: str """ @@ -27043,29 +22760,23 @@ class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__( - self, - *, - count: int, - low_key: str, - high_key: str, - **kwargs - ): + def __init__(self, *, count: int, low_key: str, high_key: str, **kwargs) -> None: super(UniformInt64RangePartitionSchemeDescription, self).__init__(**kwargs) - self.partition_scheme = 'UniformInt64Range' # type: str self.count = count self.low_key = low_key self.high_key = high_key + self.partition_scheme = 'UniformInt64Range' -class UnplacedReplicaInformation(msrest.serialization.Model): +class UnplacedReplicaInformation(Model): """Contains information for an unplaced replica. :param service_name: The name of the service. :type service_name: str :param partition_id: The ID of the partition. :type partition_id: str - :param unplaced_replica_details: List of reasons due to which a replica cannot be placed. + :param unplaced_replica_details: List of reasons due to which a replica + cannot be placed. :type unplaced_replica_details: list[str] """ @@ -27075,32 +22786,27 @@ class UnplacedReplicaInformation(msrest.serialization.Model): 'unplaced_replica_details': {'key': 'UnplacedReplicaDetails', 'type': '[str]'}, } - def __init__( - self, - *, - service_name: Optional[str] = None, - partition_id: Optional[str] = None, - unplaced_replica_details: Optional[List[str]] = None, - **kwargs - ): + def __init__(self, *, service_name: str=None, partition_id: str=None, unplaced_replica_details=None, **kwargs) -> None: super(UnplacedReplicaInformation, self).__init__(**kwargs) self.service_name = service_name self.partition_id = partition_id self.unplaced_replica_details = unplaced_replica_details -class UnprovisionApplicationTypeDescriptionInfo(msrest.serialization.Model): - """Describes the operation to unregister or unprovision an application type and its version that was registered with the Service Fabric. +class UnprovisionApplicationTypeDescriptionInfo(Model): + """Describes the operation to unregister or unprovision an application type + and its version that was registered with the Service Fabric. All required parameters must be populated in order to send to Azure. - :param application_type_version: Required. The version of the application type as defined in - the application manifest. + :param application_type_version: Required. The version of the application + type as defined in the application manifest. :type application_type_version: str - :param async_property: The flag indicating whether or not unprovision should occur - asynchronously. When set to true, the unprovision operation returns when the request is - accepted by the system, and the unprovision operation continues without any timeout limit. The - default value is false. However, we recommend setting it to true for large application packages + :param async_property: The flag indicating whether or not unprovision + should occur asynchronously. When set to true, the unprovision operation + returns when the request is accepted by the system, and the unprovision + operation continues without any timeout limit. The default value is false. + However, we recommend setting it to true for large application packages that were provisioned. :type async_property: bool """ @@ -27114,19 +22820,13 @@ class UnprovisionApplicationTypeDescriptionInfo(msrest.serialization.Model): 'async_property': {'key': 'Async', 'type': 'bool'}, } - def __init__( - self, - *, - application_type_version: str, - async_property: Optional[bool] = None, - **kwargs - ): + def __init__(self, *, application_type_version: str, async_property: bool=None, **kwargs) -> None: super(UnprovisionApplicationTypeDescriptionInfo, self).__init__(**kwargs) self.application_type_version = application_type_version self.async_property = async_property -class UnprovisionFabricDescription(msrest.serialization.Model): +class UnprovisionFabricDescription(Model): """Describes the parameters for unprovisioning a cluster. :param code_version: The cluster code package version. @@ -27140,40 +22840,40 @@ class UnprovisionFabricDescription(msrest.serialization.Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__( - self, - *, - code_version: Optional[str] = None, - config_version: Optional[str] = None, - **kwargs - ): + def __init__(self, *, code_version: str=None, config_version: str=None, **kwargs) -> None: super(UnprovisionFabricDescription, self).__init__(**kwargs) self.code_version = code_version self.config_version = config_version -class UpdateClusterUpgradeDescription(msrest.serialization.Model): +class UpdateClusterUpgradeDescription(Model): """Parameters for updating a cluster upgrade. - :param upgrade_kind: The type of upgrade out of the following possible values. Possible values - include: "Invalid", "Rolling", "Rolling_ForceRestart". Default value: "Rolling". + :param upgrade_kind: The type of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling', + 'Rolling_ForceRestart'. Default value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeType - :param update_description: Describes the parameters for updating a rolling upgrade of - application or cluster. - :type update_description: ~azure.servicefabric.models.RollingUpgradeUpdateDescription - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than - absolute health evaluation after completion of each upgrade domain. + :param update_description: Describes the parameters for updating a rolling + upgrade of application or cluster. + :type update_description: + ~azure.servicefabric.models.RollingUpgradeUpdateDescription + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health + evaluation rather than absolute health evaluation after completion of each + upgrade domain. :type enable_delta_health_evaluation: bool - :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of - the cluster during a cluster upgrade. + :param cluster_upgrade_health_policy: Defines a health policy used to + evaluate the health of the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject - :param application_health_policy_map: Defines the application health policy map used to - evaluate the health of an application or one of its children entities. - :type application_health_policy_map: ~azure.servicefabric.models.ApplicationHealthPolicies + :param application_health_policy_map: Defines the application health + policy map used to evaluate the health of an application or one of its + children entities. + :type application_health_policy_map: + ~azure.servicefabric.models.ApplicationHealthPolicies """ _attribute_map = { @@ -27185,17 +22885,7 @@ class UpdateClusterUpgradeDescription(msrest.serialization.Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicies'}, } - def __init__( - self, - *, - upgrade_kind: Optional[Union[str, "UpgradeType"]] = "Rolling", - update_description: Optional["RollingUpgradeUpdateDescription"] = None, - cluster_health_policy: Optional["ClusterHealthPolicy"] = None, - enable_delta_health_evaluation: Optional[bool] = None, - cluster_upgrade_health_policy: Optional["ClusterUpgradeHealthPolicyObject"] = None, - application_health_policy_map: Optional["ApplicationHealthPolicies"] = None, - **kwargs - ): + def __init__(self, *, upgrade_kind="Rolling", update_description=None, cluster_health_policy=None, enable_delta_health_evaluation: bool=None, cluster_upgrade_health_policy=None, application_health_policy_map=None, **kwargs) -> None: super(UpdateClusterUpgradeDescription, self).__init__(**kwargs) self.upgrade_kind = upgrade_kind self.update_description = update_description @@ -27205,13 +22895,15 @@ def __init__( self.application_health_policy_map = application_health_policy_map -class UpdatePartitionLoadResult(msrest.serialization.Model): - """Specifies result of updating load for specified partitions. The output will be ordered based on the partition ID. +class UpdatePartitionLoadResult(Model): + """Specifies result of updating load for specified partitions. The output will + be ordered based on the partition ID. :param partition_id: Id of the partition. :type partition_id: str - :param partition_error_code: If OperationState is Completed - this is 0. If OperationState is - Faulted - this is an error code indicating the reason. + :param partition_error_code: If OperationState is Completed - this is 0. + If OperationState is Faulted - this is an error code indicating the + reason. :type partition_error_code: int """ @@ -27220,58 +22912,53 @@ class UpdatePartitionLoadResult(msrest.serialization.Model): 'partition_error_code': {'key': 'PartitionErrorCode', 'type': 'int'}, } - def __init__( - self, - *, - partition_id: Optional[str] = None, - partition_error_code: Optional[int] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, partition_error_code: int=None, **kwargs) -> None: super(UpdatePartitionLoadResult, self).__init__(**kwargs) self.partition_id = partition_id self.partition_error_code = partition_error_code class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): - """Represents health evaluation for delta unhealthy cluster nodes in an upgrade domain, containing health evaluations for each unhealthy node that impacted current aggregated health state. -Can be returned during cluster upgrade when cluster aggregated health state is Warning or Error. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for delta unhealthy cluster nodes in an + upgrade domain, containing health evaluations for each unhealthy node that + impacted current aggregated health state. + Can be returned during cluster upgrade when cluster aggregated health state + is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently - evaluated. + :param kind: Required. Constant filled by server. + :type kind: str + :param upgrade_domain_name: Name of the upgrade domain where nodes health + is currently evaluated. :type upgrade_domain_name: str - :param baseline_error_count: Number of upgrade domain nodes with aggregated heath state Error - in the health store at the beginning of the cluster upgrade. + :param baseline_error_count: Number of upgrade domain nodes with + aggregated heath state Error in the health store at the beginning of the + cluster upgrade. :type baseline_error_count: long - :param baseline_total_count: Total number of upgrade domain nodes in the health store at the - beginning of the cluster upgrade. + :param baseline_total_count: Total number of upgrade domain nodes in the + health store at the beginning of the cluster upgrade. :type baseline_total_count: long - :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of upgrade domain delta - unhealthy nodes from the ClusterUpgradeHealthPolicy. + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of + upgrade domain delta unhealthy nodes from the ClusterUpgradeHealthPolicy. :type max_percent_delta_unhealthy_nodes: int - :param total_count: Total number of upgrade domain nodes in the health store. + :param total_count: Total number of upgrade domain nodes in the health + store. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -27279,9 +22966,9 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, @@ -27290,36 +22977,24 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - upgrade_domain_name: Optional[str] = None, - baseline_error_count: Optional[int] = None, - baseline_total_count: Optional[int] = None, - max_percent_delta_unhealthy_nodes: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, upgrade_domain_name: str=None, baseline_error_count: int=None, baseline_total_count: int=None, max_percent_delta_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(UpgradeDomainDeltaNodesCheckHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'UpgradeDomainDeltaNodesCheck' # type: str self.upgrade_domain_name = upgrade_domain_name self.baseline_error_count = baseline_error_count self.baseline_total_count = baseline_total_count self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'UpgradeDomainDeltaNodesCheck' -class UpgradeDomainInfo(msrest.serialization.Model): +class UpgradeDomainInfo(Model): """Information about an upgrade domain. - :param name: The name of the upgrade domain. + :param name: The name of the upgrade domain :type name: str - :param state: The state of the upgrade domain. Possible values include: "Invalid", "Pending", - "InProgress", "Completed". + :param state: The state of the upgrade domain. Possible values include: + 'Invalid', 'Pending', 'InProgress', 'Completed' :type state: str or ~azure.servicefabric.models.UpgradeDomainState """ @@ -27328,51 +23003,45 @@ class UpgradeDomainInfo(msrest.serialization.Model): 'state': {'key': 'State', 'type': 'str'}, } - def __init__( - self, - *, - name: Optional[str] = None, - state: Optional[Union[str, "UpgradeDomainState"]] = None, - **kwargs - ): + def __init__(self, *, name: str=None, state=None, **kwargs) -> None: super(UpgradeDomainInfo, self).__init__(**kwargs) self.name = name self.state = state class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): - """Represents health evaluation for cluster nodes in an upgrade domain, containing health evaluations for each unhealthy node that impacted current aggregated health state. Can be returned when evaluating cluster health during cluster upgrade and the aggregated health state is either Error or Warning. - - All required parameters must be populated in order to send to Azure. - - :param kind: Required. The health manager in the cluster performs health evaluations in - determining the aggregated health state of an entity. This enumeration provides information on - the kind of evaluation that was performed. Following are the possible values.Constant filled by - server. Possible values include: "Invalid", "Event", "Replicas", "Partitions", - "DeployedServicePackages", "DeployedApplications", "Services", "Nodes", "Applications", - "SystemApplication", "UpgradeDomainDeployedApplications", "UpgradeDomainNodes", "Replica", - "Partition", "DeployedServicePackage", "DeployedApplication", "Service", "Node", "Application", - "DeltaNodesCheck", "UpgradeDomainDeltaNodesCheck", "ApplicationTypeApplications", - "NodeTypeNodes". - :type kind: str or ~azure.servicefabric.models.HealthEvaluationKind - :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, - Node, Application, Service, Partition, Replica etc. Possible values include: "Invalid", "Ok", - "Warning", "Error", "Unknown". - :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState - :param description: Description of the health evaluation, which represents a summary of the - evaluation process. + """Represents health evaluation for cluster nodes in an upgrade domain, + containing health evaluations for each unhealthy node that impacted current + aggregated health state. Can be returned when evaluating cluster health + during cluster upgrade and the aggregated health state is either Error or + Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. :type description: str - :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently - evaluated. + :param kind: Required. Constant filled by server. + :type kind: str + :param upgrade_domain_name: Name of the upgrade domain where nodes health + is currently evaluated. :type upgrade_domain_name: str - :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes from the - ClusterHealthPolicy. + :param max_percent_unhealthy_nodes: Maximum allowed percentage of + unhealthy nodes from the ClusterHealthPolicy. :type max_percent_unhealthy_nodes: int :param total_count: Total number of nodes in the current upgrade domain. :type total_count: long - :param unhealthy_evaluations: List of unhealthy evaluations that led to the aggregated health - state. Includes all the unhealthy NodeHealthEvaluation that impacted the aggregated health. - :type unhealthy_evaluations: list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] """ _validation = { @@ -27380,38 +23049,29 @@ class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, 'total_count': {'key': 'TotalCount', 'type': 'long'}, 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__( - self, - *, - aggregated_health_state: Optional[Union[str, "HealthState"]] = None, - description: Optional[str] = None, - upgrade_domain_name: Optional[str] = None, - max_percent_unhealthy_nodes: Optional[int] = None, - total_count: Optional[int] = None, - unhealthy_evaluations: Optional[List["HealthEvaluationWrapper"]] = None, - **kwargs - ): + def __init__(self, *, aggregated_health_state=None, description: str=None, upgrade_domain_name: str=None, max_percent_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: super(UpgradeDomainNodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) - self.kind = 'UpgradeDomainNodes' # type: str self.upgrade_domain_name = upgrade_domain_name self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes self.total_count = total_count self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'UpgradeDomainNodes' -class UpgradeOrchestrationServiceState(msrest.serialization.Model): +class UpgradeOrchestrationServiceState(Model): """Service state of Service Fabric Upgrade Orchestration Service. - :param service_state: The state of Service Fabric Upgrade Orchestration Service. + :param service_state: The state of Service Fabric Upgrade Orchestration + Service. :type service_state: str """ @@ -27419,28 +23079,26 @@ class UpgradeOrchestrationServiceState(msrest.serialization.Model): 'service_state': {'key': 'ServiceState', 'type': 'str'}, } - def __init__( - self, - *, - service_state: Optional[str] = None, - **kwargs - ): + def __init__(self, *, service_state: str=None, **kwargs) -> None: super(UpgradeOrchestrationServiceState, self).__init__(**kwargs) self.service_state = service_state -class UpgradeOrchestrationServiceStateSummary(msrest.serialization.Model): +class UpgradeOrchestrationServiceStateSummary(Model): """Service state summary of Service Fabric Upgrade Orchestration Service. :param current_code_version: The current code version of the cluster. :type current_code_version: str - :param current_manifest_version: The current manifest version of the cluster. + :param current_manifest_version: The current manifest version of the + cluster. :type current_manifest_version: str :param target_code_version: The target code version of the cluster. :type target_code_version: str - :param target_manifest_version: The target manifest version of the cluster. + :param target_manifest_version: The target manifest version of the + cluster. :type target_manifest_version: str - :param pending_upgrade_type: The type of the pending upgrade of the cluster. + :param pending_upgrade_type: The type of the pending upgrade of the + cluster. :type pending_upgrade_type: str """ @@ -27452,16 +23110,7 @@ class UpgradeOrchestrationServiceStateSummary(msrest.serialization.Model): 'pending_upgrade_type': {'key': 'PendingUpgradeType', 'type': 'str'}, } - def __init__( - self, - *, - current_code_version: Optional[str] = None, - current_manifest_version: Optional[str] = None, - target_code_version: Optional[str] = None, - target_manifest_version: Optional[str] = None, - pending_upgrade_type: Optional[str] = None, - **kwargs - ): + def __init__(self, *, current_code_version: str=None, current_manifest_version: str=None, target_code_version: str=None, target_manifest_version: str=None, pending_upgrade_type: str=None, **kwargs) -> None: super(UpgradeOrchestrationServiceStateSummary, self).__init__(**kwargs) self.current_code_version = current_code_version self.current_manifest_version = current_manifest_version @@ -27470,14 +23119,14 @@ def __init__( self.pending_upgrade_type = pending_upgrade_type -class UploadChunkRange(msrest.serialization.Model): +class UploadChunkRange(Model): """Information about which portion of the file to upload. - :param start_position: The start position of the portion of the file. It's represented by the - number of bytes. + :param start_position: The start position of the portion of the file. It's + represented by the number of bytes. :type start_position: str - :param end_position: The end position of the portion of the file. It's represented by the - number of bytes. + :param end_position: The end position of the portion of the file. It's + represented by the number of bytes. :type end_position: str """ @@ -27486,24 +23135,19 @@ class UploadChunkRange(msrest.serialization.Model): 'end_position': {'key': 'EndPosition', 'type': 'str'}, } - def __init__( - self, - *, - start_position: Optional[str] = None, - end_position: Optional[str] = None, - **kwargs - ): + def __init__(self, *, start_position: str=None, end_position: str=None, **kwargs) -> None: super(UploadChunkRange, self).__init__(**kwargs) self.start_position = start_position self.end_position = end_position -class UploadSession(msrest.serialization.Model): +class UploadSession(Model): """Information about a image store upload session. - :param upload_sessions: When querying upload session by upload session ID, the result contains - only one upload session. When querying upload session by image store relative path, the result - might contain multiple upload sessions. + :param upload_sessions: When querying upload session by upload session ID, + the result contains only one upload session. When querying upload session + by image store relative path, the result might contain multiple upload + sessions. :type upload_sessions: list[~azure.servicefabric.models.UploadSessionInfo] """ @@ -27511,30 +23155,28 @@ class UploadSession(msrest.serialization.Model): 'upload_sessions': {'key': 'UploadSessions', 'type': '[UploadSessionInfo]'}, } - def __init__( - self, - *, - upload_sessions: Optional[List["UploadSessionInfo"]] = None, - **kwargs - ): + def __init__(self, *, upload_sessions=None, **kwargs) -> None: super(UploadSession, self).__init__(**kwargs) self.upload_sessions = upload_sessions -class UploadSessionInfo(msrest.serialization.Model): - """Information about an image store upload session. A session is associated with a relative path in the image store. +class UploadSessionInfo(Model): + """Information about an image store upload session. A session is associated + with a relative path in the image store. - :param store_relative_path: The remote location within image store. This path is relative to - the image store root. + :param store_relative_path: The remote location within image store. This + path is relative to the image store root. :type store_relative_path: str - :param session_id: A unique ID of the upload session. A session ID can be reused only if the - session was committed or removed. + :param session_id: A unique ID of the upload session. A session ID can be + reused only if the session was committed or removed. :type session_id: str - :param modified_date: The date and time when the upload session was last modified. - :type modified_date: ~datetime.datetime + :param modified_date: The date and time when the upload session was last + modified. + :type modified_date: datetime :param file_size: The size in bytes of the uploading file. :type file_size: str - :param expected_ranges: List of chunk ranges that image store has not received yet. + :param expected_ranges: List of chunk ranges that image store has not + received yet. :type expected_ranges: list[~azure.servicefabric.models.UploadChunkRange] """ @@ -27546,16 +23188,7 @@ class UploadSessionInfo(msrest.serialization.Model): 'expected_ranges': {'key': 'ExpectedRanges', 'type': '[UploadChunkRange]'}, } - def __init__( - self, - *, - store_relative_path: Optional[str] = None, - session_id: Optional[str] = None, - modified_date: Optional[datetime.datetime] = None, - file_size: Optional[str] = None, - expected_ranges: Optional[List["UploadChunkRange"]] = None, - **kwargs - ): + def __init__(self, *, store_relative_path: str=None, session_id: str=None, modified_date=None, file_size: str=None, expected_ranges=None, **kwargs) -> None: super(UploadSessionInfo, self).__init__(**kwargs) self.store_relative_path = store_relative_path self.session_id = session_id @@ -27564,12 +23197,13 @@ def __init__( self.expected_ranges = expected_ranges -class UsageInfo(msrest.serialization.Model): - """Information about how much space and how many files in the file system the ImageStore is using in this category. +class UsageInfo(Model): + """Information about how much space and how many files in the file system the + ImageStore is using in this category. - :param used_space: the size of all files in this category. + :param used_space: the size of all files in this category :type used_space: str - :param file_count: the number of all files in this category. + :param file_count: the number of all files in this category :type file_count: str """ @@ -27578,13 +23212,7 @@ class UsageInfo(msrest.serialization.Model): 'file_count': {'key': 'FileCount', 'type': 'str'}, } - def __init__( - self, - *, - used_space: Optional[str] = None, - file_count: Optional[str] = None, - **kwargs - ): + def __init__(self, *, used_space: str=None, file_count: str=None, **kwargs) -> None: super(UsageInfo, self).__init__(**kwargs) self.used_space = used_space self.file_count = file_count @@ -27595,53 +23223,48 @@ class ValidationFailedChaosEvent(ChaosEvent): All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param reason: Describes why the ValidationFailedChaosEvent was generated. This may happen - because more than MaxPercentUnhealthyNodes are unhealthy for more than - MaxClusterStabilizationTimeout. This reason will be in the Reason property of the - ValidationFailedChaosEvent as a string. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why the ValidationFailedChaosEvent was generated. + This may happen because more than MaxPercentUnhealthyNodes are unhealthy + for more than MaxClusterStabilizationTimeout. This reason will be in the + Reason property of the ValidationFailedChaosEvent as a string. :type reason: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - *, - time_stamp_utc: datetime.datetime, - reason: Optional[str] = None, - **kwargs - ): + def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: super(ValidationFailedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) - self.kind = 'ValidationFailed' # type: str self.reason = reason + self.kind = 'ValidationFailed' -class VolumeProviderParametersAzureFile(msrest.serialization.Model): +class VolumeProviderParametersAzureFile(Model): """This type describes a volume provided by an Azure Files file share. All required parameters must be populated in order to send to Azure. - :param account_name: Required. Name of the Azure storage account for the File Share. + :param account_name: Required. Name of the Azure storage account for the + File Share. :type account_name: str - :param account_key: Access key of the Azure storage account for the File Share. + :param account_key: Access key of the Azure storage account for the File + Share. :type account_key: str - :param share_name: Required. Name of the Azure Files file share that provides storage for the - volume. + :param share_name: Required. Name of the Azure Files file share that + provides storage for the volume. :type share_name: str """ @@ -27656,24 +23279,18 @@ class VolumeProviderParametersAzureFile(msrest.serialization.Model): 'share_name': {'key': 'shareName', 'type': 'str'}, } - def __init__( - self, - *, - account_name: str, - share_name: str, - account_key: Optional[str] = None, - **kwargs - ): + def __init__(self, *, account_name: str, share_name: str, account_key: str=None, **kwargs) -> None: super(VolumeProviderParametersAzureFile, self).__init__(**kwargs) self.account_name = account_name self.account_key = account_key self.share_name = share_name -class VolumeResourceDescription(msrest.serialization.Model): +class VolumeResourceDescription(Model): """This type describes a volume resource. - Variables are only populated by the server, and will be ignored when sending a request. + Variables are only populated by the server, and will be ignored when + sending a request. All required parameters must be populated in order to send to Azure. @@ -27681,23 +23298,26 @@ class VolumeResourceDescription(msrest.serialization.Model): :type name: str :param description: User readable description of the volume. :type description: str - :ivar status: Status of the volume. Possible values include: "Unknown", "Ready", "Upgrading", - "Creating", "Deleting", "Failed". + :ivar status: Status of the volume. Possible values include: 'Unknown', + 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.servicefabric.models.ResourceStatus - :ivar status_details: Gives additional information about the current status of the volume. + :ivar status_details: Gives additional information about the current + status of the volume. :vartype status_details: str - :param provider: Required. Provider of the volume. Possible values include: "SFAzureFile". - :type provider: str or ~azure.servicefabric.models.VolumeProvider - :param azure_file_parameters: This type describes a volume provided by an Azure Files file - share. - :type azure_file_parameters: ~azure.servicefabric.models.VolumeProviderParametersAzureFile + :ivar provider: Required. Provider of the volume. Default value: + "SFAzureFile" . + :vartype provider: str + :param azure_file_parameters: This type describes a volume provided by an + Azure Files file share. + :type azure_file_parameters: + ~azure.servicefabric.models.VolumeProviderParametersAzureFile """ _validation = { 'name': {'required': True}, 'status': {'readonly': True}, 'status_details': {'readonly': True}, - 'provider': {'required': True}, + 'provider': {'required': True, 'constant': True}, } _attribute_map = { @@ -27709,36 +23329,29 @@ class VolumeResourceDescription(msrest.serialization.Model): 'azure_file_parameters': {'key': 'properties.azureFileParameters', 'type': 'VolumeProviderParametersAzureFile'}, } - def __init__( - self, - *, - name: str, - provider: Union[str, "VolumeProvider"], - description: Optional[str] = None, - azure_file_parameters: Optional["VolumeProviderParametersAzureFile"] = None, - **kwargs - ): + provider = "SFAzureFile" + + def __init__(self, *, name: str, description: str=None, azure_file_parameters=None, **kwargs) -> None: super(VolumeResourceDescription, self).__init__(**kwargs) self.name = name self.description = description self.status = None self.status_details = None - self.provider = provider self.azure_file_parameters = azure_file_parameters class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the replica build operation to finish. This indicates that there is a replica that is going through the copy or is providing data for building another replica. Bring the node down will abort this copy operation which are typically expensive involving data movements. + """Safety check that waits for the replica build operation to finish. This + indicates that there is a replica that is going through the copy or is + providing data for building another replica. Bring the node down will abort + this copy operation which are typically expensive involving data movements. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -27751,28 +23364,21 @@ class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, **kwargs) -> None: super(WaitForInbuildReplicaSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'WaitForInbuildReplica' # type: str + self.kind = 'WaitForInbuildReplica' class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the primary replica that was moved out of the node due to upgrade to be placed back again on that node. + """Safety check that waits for the primary replica that was moved out of the + node due to upgrade to be placed back again on that node. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -27785,28 +23391,22 @@ class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, **kwargs) -> None: super(WaitForPrimaryPlacementSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'WaitForPrimaryPlacement' # type: str + self.kind = 'WaitForPrimaryPlacement' class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the primary replica to be moved out of the node before starting an upgrade to ensure the availability of the primary replica for the partition. + """Safety check that waits for the primary replica to be moved out of the node + before starting an upgrade to ensure the availability of the primary + replica for the partition. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -27819,28 +23419,21 @@ class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, **kwargs) -> None: super(WaitForPrimarySwapSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'WaitForPrimarySwap' # type: str + self.kind = 'WaitForPrimarySwap' class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): - """Safety check that waits for the current reconfiguration of the partition to be completed before starting an upgrade. + """Safety check that waits for the current reconfiguration of the partition to + be completed before starting an upgrade. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of safety check performed by service fabric before continuing - with the operations. These checks ensure the availability of the service and the reliability of - the state. Following are the kinds of safety checks.Constant filled by server. Possible values - include: "Invalid", "EnsureSeedNodeQuorum", "EnsurePartitionQuorum", "WaitForPrimaryPlacement", - "WaitForPrimarySwap", "WaitForReconfiguration", "WaitForInbuildReplica", "EnsureAvailability". - :type kind: str or ~azure.servicefabric.models.SafetyCheckKind - :param partition_id: Id of the partition which is undergoing the safety check. + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. :type partition_id: str """ @@ -27853,50 +23446,40 @@ class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__( - self, - *, - partition_id: Optional[str] = None, - **kwargs - ): + def __init__(self, *, partition_id: str=None, **kwargs) -> None: super(WaitForReconfigurationSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) - self.kind = 'WaitForReconfiguration' # type: str + self.kind = 'WaitForReconfiguration' class WaitingChaosEvent(ChaosEvent): - """Describes a Chaos event that gets generated when Chaos is waiting for the cluster to become ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. + """Describes a Chaos event that gets generated when Chaos is waiting for the + cluster to become ready for faulting, for example, Chaos may be waiting for + the on-going upgrade to finish. All required parameters must be populated in order to send to Azure. - :param kind: Required. The kind of Chaos event.Constant filled by server. Possible values - include: "Invalid", "Started", "ExecutingFaults", "Waiting", "ValidationFailed", "TestError", - "Stopped". - :type kind: str or ~azure.servicefabric.models.ChaosEventKind - :param time_stamp_utc: Required. The UTC timestamp when this Chaos event was generated. - :type time_stamp_utc: ~datetime.datetime - :param reason: Describes why the WaitingChaosEvent was generated, for example, due to a cluster - upgrade. + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why the WaitingChaosEvent was generated, for + example, due to a cluster upgrade. :type reason: str """ _validation = { - 'kind': {'required': True}, 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, } _attribute_map = { - 'kind': {'key': 'Kind', 'type': 'str'}, 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__( - self, - *, - time_stamp_utc: datetime.datetime, - reason: Optional[str] = None, - **kwargs - ): + def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: super(WaitingChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) - self.kind = 'Waiting' # type: str self.reason = reason + self.kind = 'Waiting' diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_ap_is_enums.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_ap_is_enums.py new file mode 100644 index 000000000000..7072ed2589c2 --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_ap_is_enums.py @@ -0,0 +1,1094 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class HealthState(str, Enum): + + invalid = "Invalid" #: Indicates an invalid health state. All Service Fabric enumerations have the invalid type. The value is zero. + ok = "Ok" #: Indicates the health state is okay. The value is 1. + warning = "Warning" #: Indicates the health state is at a warning level. The value is 2. + error = "Error" #: Indicates the health state is at an error level. Error health state should be investigated, as they can impact the correct functionality of the cluster. The value is 3. + unknown = "Unknown" #: Indicates an unknown health status. The value is 65535. + + +class FabricErrorCodes(str, Enum): + + fabric_e_invalid_partition_key = "FABRIC_E_INVALID_PARTITION_KEY" + fabric_e_imagebuilder_validation_error = "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" + fabric_e_invalid_address = "FABRIC_E_INVALID_ADDRESS" + fabric_e_application_not_upgrading = "FABRIC_E_APPLICATION_NOT_UPGRADING" + fabric_e_application_upgrade_validation_error = "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" + fabric_e_fabric_not_upgrading = "FABRIC_E_FABRIC_NOT_UPGRADING" + fabric_e_fabric_upgrade_validation_error = "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" + fabric_e_invalid_configuration = "FABRIC_E_INVALID_CONFIGURATION" + fabric_e_invalid_name_uri = "FABRIC_E_INVALID_NAME_URI" + fabric_e_path_too_long = "FABRIC_E_PATH_TOO_LONG" + fabric_e_key_too_large = "FABRIC_E_KEY_TOO_LARGE" + fabric_e_service_affinity_chain_not_supported = "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" + fabric_e_invalid_atomic_group = "FABRIC_E_INVALID_ATOMIC_GROUP" + fabric_e_value_empty = "FABRIC_E_VALUE_EMPTY" + fabric_e_node_not_found = "FABRIC_E_NODE_NOT_FOUND" + fabric_e_application_type_not_found = "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" + fabric_e_application_not_found = "FABRIC_E_APPLICATION_NOT_FOUND" + fabric_e_service_type_not_found = "FABRIC_E_SERVICE_TYPE_NOT_FOUND" + fabric_e_service_does_not_exist = "FABRIC_E_SERVICE_DOES_NOT_EXIST" + fabric_e_service_type_template_not_found = "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" + fabric_e_configuration_section_not_found = "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" + fabric_e_partition_not_found = "FABRIC_E_PARTITION_NOT_FOUND" + fabric_e_replica_does_not_exist = "FABRIC_E_REPLICA_DOES_NOT_EXIST" + fabric_e_service_group_does_not_exist = "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" + fabric_e_configuration_parameter_not_found = "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" + fabric_e_directory_not_found = "FABRIC_E_DIRECTORY_NOT_FOUND" + fabric_e_fabric_version_not_found = "FABRIC_E_FABRIC_VERSION_NOT_FOUND" + fabric_e_file_not_found = "FABRIC_E_FILE_NOT_FOUND" + fabric_e_name_does_not_exist = "FABRIC_E_NAME_DOES_NOT_EXIST" + fabric_e_property_does_not_exist = "FABRIC_E_PROPERTY_DOES_NOT_EXIST" + fabric_e_enumeration_completed = "FABRIC_E_ENUMERATION_COMPLETED" + fabric_e_service_manifest_not_found = "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" + fabric_e_key_not_found = "FABRIC_E_KEY_NOT_FOUND" + fabric_e_health_entity_not_found = "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + fabric_e_application_type_already_exists = "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" + fabric_e_application_already_exists = "FABRIC_E_APPLICATION_ALREADY_EXISTS" + fabric_e_application_already_in_target_version = "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" + fabric_e_application_type_provision_in_progress = "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" + fabric_e_application_upgrade_in_progress = "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" + fabric_e_service_already_exists = "FABRIC_E_SERVICE_ALREADY_EXISTS" + fabric_e_service_group_already_exists = "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" + fabric_e_application_type_in_use = "FABRIC_E_APPLICATION_TYPE_IN_USE" + fabric_e_fabric_already_in_target_version = "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" + fabric_e_fabric_version_already_exists = "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" + fabric_e_fabric_version_in_use = "FABRIC_E_FABRIC_VERSION_IN_USE" + fabric_e_fabric_upgrade_in_progress = "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" + fabric_e_name_already_exists = "FABRIC_E_NAME_ALREADY_EXISTS" + fabric_e_name_not_empty = "FABRIC_E_NAME_NOT_EMPTY" + fabric_e_property_check_failed = "FABRIC_E_PROPERTY_CHECK_FAILED" + fabric_e_service_metadata_mismatch = "FABRIC_E_SERVICE_METADATA_MISMATCH" + fabric_e_service_type_mismatch = "FABRIC_E_SERVICE_TYPE_MISMATCH" + fabric_e_health_stale_report = "FABRIC_E_HEALTH_STALE_REPORT" + fabric_e_sequence_number_check_failed = "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" + fabric_e_node_has_not_stopped_yet = "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" + fabric_e_instance_id_mismatch = "FABRIC_E_INSTANCE_ID_MISMATCH" + fabric_e_value_too_large = "FABRIC_E_VALUE_TOO_LARGE" + fabric_e_no_write_quorum = "FABRIC_E_NO_WRITE_QUORUM" + fabric_e_not_primary = "FABRIC_E_NOT_PRIMARY" + fabric_e_not_ready = "FABRIC_E_NOT_READY" + fabric_e_reconfiguration_pending = "FABRIC_E_RECONFIGURATION_PENDING" + fabric_e_service_offline = "FABRIC_E_SERVICE_OFFLINE" + e_abort = "E_ABORT" + fabric_e_communication_error = "FABRIC_E_COMMUNICATION_ERROR" + fabric_e_operation_not_complete = "FABRIC_E_OPERATION_NOT_COMPLETE" + fabric_e_timeout = "FABRIC_E_TIMEOUT" + fabric_e_node_is_up = "FABRIC_E_NODE_IS_UP" + e_fail = "E_FAIL" + fabric_e_backup_is_enabled = "FABRIC_E_BACKUP_IS_ENABLED" + fabric_e_restore_source_target_partition_mismatch = "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + fabric_e_invalid_for_stateless_services = "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + fabric_e_backup_not_enabled = "FABRIC_E_BACKUP_NOT_ENABLED" + fabric_e_backup_policy_not_existing = "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + fabric_e_fault_analysis_service_not_existing = "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" + fabric_e_backup_in_progress = "FABRIC_E_BACKUP_IN_PROGRESS" + fabric_e_restore_in_progress = "FABRIC_E_RESTORE_IN_PROGRESS" + fabric_e_backup_policy_already_existing = "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" + fabric_e_invalid_service_scaling_policy = "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + e_invalidarg = "E_INVALIDARG" + fabric_e_single_instance_application_already_exists = "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" + fabric_e_single_instance_application_not_found = "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" + fabric_e_volume_already_exists = "FABRIC_E_VOLUME_ALREADY_EXISTS" + fabric_e_volume_not_found = "FABRIC_E_VOLUME_NOT_FOUND" + serialization_error = "SerializationError" + fabric_e_imagebuilder_reserved_directory_error = "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" + + +class ApplicationDefinitionKind(str, Enum): + + invalid = "Invalid" #: Indicates the application definition kind is invalid. All Service Fabric enumerations have the invalid type. The value is 65535. + service_fabric_application_description = "ServiceFabricApplicationDescription" #: Indicates the application is defined by a Service Fabric application description. The value is 0. + compose = "Compose" #: Indicates the application is defined by compose file(s). The value is 1. + + +class ApplicationStatus(str, Enum): + + invalid = "Invalid" #: Indicates the application status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + ready = "Ready" #: Indicates the application status is ready. The value is 1. + upgrading = "Upgrading" #: Indicates the application status is upgrading. The value is 2. + creating = "Creating" #: Indicates the application status is creating. The value is 3. + deleting = "Deleting" #: Indicates the application status is deleting. The value is 4. + failed = "Failed" #: Indicates the creation or deletion of application was terminated due to persistent failures. Another create/delete request can be accepted to resume a failed application. The value is 5. + + +class ApplicationPackageCleanupPolicy(str, Enum): + + invalid = "Invalid" #: Indicates that the application package cleanup policy is invalid. This value is default. The value is zero. + default = "Default" #: Indicates that the cleanup policy of application packages is based on the cluster setting "CleanupApplicationPackageOnProvisionSuccess." The value is 1. + automatic = "Automatic" #: Indicates that the service fabric runtime determines when to do the application package cleanup. By default, cleanup is done on successful provision. The value is 2. + manual = "Manual" #: Indicates that the user has to explicitly clean up the application package. The value is 3. + + +class ApplicationTypeDefinitionKind(str, Enum): + + invalid = "Invalid" #: Indicates the application type definition kind is invalid. All Service Fabric enumerations have the invalid type. The value is 0. + service_fabric_application_package = "ServiceFabricApplicationPackage" #: Indicates the application type is defined and created by a Service Fabric application package provided by the user. The value is 1. + compose = "Compose" #: Indicates the application type is defined and created implicitly as part of a compose deployment. The value is 2. + + +class ApplicationTypeStatus(str, Enum): + + invalid = "Invalid" #: Indicates the application type status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + provisioning = "Provisioning" #: Indicates that the application type is being provisioned in the cluster. The value is 1. + available = "Available" #: Indicates that the application type is fully provisioned and is available for use. An application of this type and version can be created. The value is 2. + unprovisioning = "Unprovisioning" #: Indicates that the application type is in process of being unprovisioned from the cluster. The value is 3. + failed = "Failed" #: Indicates that the application type provisioning failed and it is unavailable for use. The failure details can be obtained from the application type information query. The failed application type information remains in the cluster until it is unprovisioned or reprovisioned successfully. The value is 4. + + +class UpgradeKind(str, Enum): + + invalid = "Invalid" #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + rolling = "Rolling" #: The upgrade progresses one upgrade domain at a time. The value is 1 + + +class UpgradeMode(str, Enum): + + invalid = "Invalid" #: Indicates the upgrade mode is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + unmonitored_auto = "UnmonitoredAuto" #: The upgrade will proceed automatically without performing any health monitoring. The value is 1 + unmonitored_manual = "UnmonitoredManual" #: The upgrade will stop after completing each upgrade domain, giving the opportunity to manually monitor health before proceeding. The value is 2 + monitored = "Monitored" #: The upgrade will stop after completing each upgrade domain and automatically monitor health before proceeding. The value is 3 + + +class UpgradeSortOrder(str, Enum): + + invalid = "Invalid" #: Indicates that this sort order is not valid. All Service Fabric enumerations have the invalid type. The value is 0. + default = "Default" #: Indicates that the default sort order (as specified in cluster manifest) will be used. The value is 1. + numeric = "Numeric" #: Indicates that forward numeric sort order (UD names sorted as numbers) will be used. The value is 2. + lexicographical = "Lexicographical" #: Indicates that forward lexicographical sort order (UD names sorted as strings) will be used. The value is 3. + reverse_numeric = "ReverseNumeric" #: Indicates that reverse numeric sort order (UD names sorted as numbers) will be used. The value is 4. + reverse_lexicographical = "ReverseLexicographical" #: Indicates that reverse lexicographical sort order (UD names sorted as strings) will be used. The value is 5. + + +class FailureAction(str, Enum): + + invalid = "Invalid" #: Indicates the failure action is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + rollback = "Rollback" #: The upgrade will start rolling back automatically. The value is 1 + manual = "Manual" #: The upgrade will switch to UnmonitoredManual upgrade mode. The value is 2 + + +class UpgradeDomainState(str, Enum): + + invalid = "Invalid" #: Indicates the upgrade domain state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + pending = "Pending" #: The upgrade domain has not started upgrading yet. The value is 1 + in_progress = "InProgress" #: The upgrade domain is being upgraded but not complete yet. The value is 2 + completed = "Completed" #: The upgrade domain has completed upgrade. The value is 3 + + +class UpgradeState(str, Enum): + + invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + rolling_back_in_progress = "RollingBackInProgress" #: The upgrade is rolling back to the previous version but is not complete yet. The value is 1 + rolling_back_completed = "RollingBackCompleted" #: The upgrade has finished rolling back. The value is 2 + rolling_forward_pending = "RollingForwardPending" #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an explicit move next request in UnmonitoredManual mode or performing health checks in Monitored mode. The value is 3 + rolling_forward_in_progress = "RollingForwardInProgress" #: The upgrade is rolling forward to the target version but is not complete yet. The value is 4 + rolling_forward_completed = "RollingForwardCompleted" #: The upgrade has finished rolling forward. The value is 5 + failed = "Failed" #: The upgrade has failed and is unable to execute FailureAction. The value is 6 + + +class NodeUpgradePhase(str, Enum): + + invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + pre_upgrade_safety_check = "PreUpgradeSafetyCheck" #: The upgrade has not started yet due to pending safety checks. The value is 1 + upgrading = "Upgrading" #: The upgrade is in progress. The value is 2 + post_upgrade_safety_check = "PostUpgradeSafetyCheck" #: The upgrade has completed and post upgrade safety checks are being performed. The value is 3 + + +class FailureReason(str, Enum): + + none = "None" #: Indicates the reason is invalid or unknown. All Service Fabric enumerations have the invalid type. The value is zero. + interrupted = "Interrupted" #: There was an external request to roll back the upgrade. The value is 1 + health_check = "HealthCheck" #: The upgrade failed due to health policy violations. The value is 2 + upgrade_domain_timeout = "UpgradeDomainTimeout" #: An upgrade domain took longer than the allowed upgrade domain timeout to process. The value is 3 + overall_upgrade_timeout = "OverallUpgradeTimeout" #: The overall upgrade took longer than the allowed upgrade timeout to process. The value is 4 + + +class DeactivationIntent(str, Enum): + + pause = "Pause" #: Indicates that the node should be paused. The value is 1. + restart = "Restart" #: Indicates that the intent is for the node to be restarted after a short period of time. The value is 2. + remove_data = "RemoveData" #: Indicates the intent is for the node to remove data. The value is 3. + + +class DeployedApplicationStatus(str, Enum): + + invalid = "Invalid" #: Indicates that deployment status is not valid. All Service Fabric enumerations have the invalid type. The value is zero. + downloading = "Downloading" #: Indicates that the package is downloading from the ImageStore. The value is 1. + activating = "Activating" #: Indicates that the package is activating. The value is 2. + active = "Active" #: Indicates that the package is active. The value is 3. + upgrading = "Upgrading" #: Indicates that the package is upgrading. The value is 4. + deactivating = "Deactivating" #: Indicates that the package is deactivating. The value is 5. + + +class ReplicaStatus(str, Enum): + + invalid = "Invalid" #: Indicates the replica status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + in_build = "InBuild" #: The replica is being built. This means that a primary replica is seeding this replica. The value is 1. + standby = "Standby" #: The replica is in standby. The value is 2. + ready = "Ready" #: The replica is ready. The value is 3. + down = "Down" #: The replica is down. The value is 4. + dropped = "Dropped" #: Replica is dropped. This means that the replica has been removed from the replica set. If it is persisted, its state has been deleted. The value is 5. + + +class ReplicaRole(str, Enum): + + unknown = "Unknown" #: Indicates the initial role that a replica is created in. The value is zero. + none = "None" #: Specifies that the replica has no responsibility in regard to the replica set. The value is 1 + primary = "Primary" #: Refers to the replica in the set on which all read and write operations are complete in order to enforce strong consistency semantics. Read operations are handled directly by the Primary replica, while write operations must be acknowledged by a quorum of the replicas in the replica set. There can only be one Primary replica in a replica set at a time. The value is 2. + idle_secondary = "IdleSecondary" #: Refers to a replica in the set that receives a state transfer from the Primary replica to prepare for becoming an active Secondary replica. There can be multiple Idle Secondary replicas in a replica set at a time. Idle Secondary replicas do not count as a part of a write quorum. The value is 3. + active_secondary = "ActiveSecondary" #: Refers to a replica in the set that receives state updates from the Primary replica, applies them, and sends acknowledgements back. Secondary replicas must participate in the write quorum for a replica set. There can be multiple active Secondary replicas in a replica set at a time. The number of active Secondary replicas is configurable that the reliability subsystem should maintain. The value is 4. + + +class ReconfigurationPhase(str, Enum): + + unknown = "Unknown" #: Indicates the invalid reconfiguration phase. + none = "None" #: Specifies that there is no reconfiguration in progress. + phase0 = "Phase0" #: Refers to the phase where the reconfiguration is transferring data from the previous primary to the new primary. + phase1 = "Phase1" #: Refers to the phase where the reconfiguration is querying the replica set for the progress. + phase2 = "Phase2" #: Refers to the phase where the reconfiguration is ensuring that data from the current primary is present in a majority of the replica set. + phase3 = "Phase3" #: This phase is for internal use only. + phase4 = "Phase4" #: This phase is for internal use only. + abort_phase_zero = "AbortPhaseZero" #: This phase is for internal use only. + + +class ReconfigurationType(str, Enum): + + unknown = "Unknown" #: Indicates the invalid reconfiguration type. + swap_primary = "SwapPrimary" #: Specifies that the primary replica is being swapped with a different replica. + failover = "Failover" #: Reconfiguration triggered in response to a primary going down. This could be due to many reasons such as primary replica crashing etc. + other = "Other" #: Reconfigurations where the primary replica is not changing. + + +class EntityKind(str, Enum): + + invalid = "Invalid" #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. The value is zero. + node = "Node" #: Indicates the entity is a Service Fabric node. The value is 1. + partition = "Partition" #: Indicates the entity is a Service Fabric partition. The value is 2. + service = "Service" #: Indicates the entity is a Service Fabric service. The value is 3. + application = "Application" #: Indicates the entity is a Service Fabric application. The value is 4. + replica = "Replica" #: Indicates the entity is a Service Fabric replica. The value is 5. + deployed_application = "DeployedApplication" #: Indicates the entity is a Service Fabric deployed application. The value is 6. + deployed_service_package = "DeployedServicePackage" #: Indicates the entity is a Service Fabric deployed service package. The value is 7. + cluster = "Cluster" #: Indicates the entity is a Service Fabric cluster. The value is 8. + + +class FabricEventKind(str, Enum): + + cluster_event = "ClusterEvent" + container_instance_event = "ContainerInstanceEvent" + node_event = "NodeEvent" + application_event = "ApplicationEvent" + service_event = "ServiceEvent" + partition_event = "PartitionEvent" + replica_event = "ReplicaEvent" + partition_analysis_event = "PartitionAnalysisEvent" + application_created = "ApplicationCreated" + application_deleted = "ApplicationDeleted" + application_new_health_report = "ApplicationNewHealthReport" + application_health_report_expired = "ApplicationHealthReportExpired" + application_upgrade_completed = "ApplicationUpgradeCompleted" + application_upgrade_domain_completed = "ApplicationUpgradeDomainCompleted" + application_upgrade_rollback_completed = "ApplicationUpgradeRollbackCompleted" + application_upgrade_rollback_started = "ApplicationUpgradeRollbackStarted" + application_upgrade_started = "ApplicationUpgradeStarted" + deployed_application_new_health_report = "DeployedApplicationNewHealthReport" + deployed_application_health_report_expired = "DeployedApplicationHealthReportExpired" + application_process_exited = "ApplicationProcessExited" + application_container_instance_exited = "ApplicationContainerInstanceExited" + node_aborted = "NodeAborted" + node_added_to_cluster = "NodeAddedToCluster" + node_closed = "NodeClosed" + node_deactivate_completed = "NodeDeactivateCompleted" + node_deactivate_started = "NodeDeactivateStarted" + node_down = "NodeDown" + node_new_health_report = "NodeNewHealthReport" + node_health_report_expired = "NodeHealthReportExpired" + node_open_succeeded = "NodeOpenSucceeded" + node_open_failed = "NodeOpenFailed" + node_removed_from_cluster = "NodeRemovedFromCluster" + node_up = "NodeUp" + partition_new_health_report = "PartitionNewHealthReport" + partition_health_report_expired = "PartitionHealthReportExpired" + partition_reconfigured = "PartitionReconfigured" + partition_primary_move_analysis = "PartitionPrimaryMoveAnalysis" + service_created = "ServiceCreated" + service_deleted = "ServiceDeleted" + service_new_health_report = "ServiceNewHealthReport" + service_health_report_expired = "ServiceHealthReportExpired" + deployed_service_package_new_health_report = "DeployedServicePackageNewHealthReport" + deployed_service_package_health_report_expired = "DeployedServicePackageHealthReportExpired" + stateful_replica_new_health_report = "StatefulReplicaNewHealthReport" + stateful_replica_health_report_expired = "StatefulReplicaHealthReportExpired" + stateless_replica_new_health_report = "StatelessReplicaNewHealthReport" + stateless_replica_health_report_expired = "StatelessReplicaHealthReportExpired" + cluster_new_health_report = "ClusterNewHealthReport" + cluster_health_report_expired = "ClusterHealthReportExpired" + cluster_upgrade_completed = "ClusterUpgradeCompleted" + cluster_upgrade_domain_completed = "ClusterUpgradeDomainCompleted" + cluster_upgrade_rollback_completed = "ClusterUpgradeRollbackCompleted" + cluster_upgrade_rollback_started = "ClusterUpgradeRollbackStarted" + cluster_upgrade_started = "ClusterUpgradeStarted" + chaos_stopped = "ChaosStopped" + chaos_started = "ChaosStarted" + chaos_code_package_restart_scheduled = "ChaosCodePackageRestartScheduled" + chaos_replica_removal_scheduled = "ChaosReplicaRemovalScheduled" + chaos_partition_secondary_move_scheduled = "ChaosPartitionSecondaryMoveScheduled" + chaos_partition_primary_move_scheduled = "ChaosPartitionPrimaryMoveScheduled" + chaos_replica_restart_scheduled = "ChaosReplicaRestartScheduled" + chaos_node_restart_scheduled = "ChaosNodeRestartScheduled" + + +class HealthEvaluationKind(str, Enum): + + invalid = "Invalid" #: Indicates that the health evaluation is invalid. The value is zero. + event = "Event" #: Indicates that the health evaluation is for a health event. The value is 1. + replicas = "Replicas" #: Indicates that the health evaluation is for the replicas of a partition. The value is 2. + partitions = "Partitions" #: Indicates that the health evaluation is for the partitions of a service. The value is 3. + deployed_service_packages = "DeployedServicePackages" #: Indicates that the health evaluation is for the deployed service packages of a deployed application. The value is 4. + deployed_applications = "DeployedApplications" #: Indicates that the health evaluation is for the deployed applications of an application. The value is 5. + services = "Services" #: Indicates that the health evaluation is for services of an application. The value is 6. + nodes = "Nodes" #: Indicates that the health evaluation is for the cluster nodes. The value is 7. + applications = "Applications" #: Indicates that the health evaluation is for the cluster applications. The value is 8. + system_application = "SystemApplication" #: Indicates that the health evaluation is for the system application. The value is 9. + upgrade_domain_deployed_applications = "UpgradeDomainDeployedApplications" #: Indicates that the health evaluation is for the deployed applications of an application in an upgrade domain. The value is 10. + upgrade_domain_nodes = "UpgradeDomainNodes" #: Indicates that the health evaluation is for the cluster nodes in an upgrade domain. The value is 11. + replica = "Replica" #: Indicates that the health evaluation is for a replica. The value is 13. + partition = "Partition" #: Indicates that the health evaluation is for a partition. The value is 14. + deployed_service_package = "DeployedServicePackage" #: Indicates that the health evaluation is for a deployed service package. The value is 16. + deployed_application = "DeployedApplication" #: Indicates that the health evaluation is for a deployed application. The value is 17. + service = "Service" #: Indicates that the health evaluation is for a service. The value is 15. + node = "Node" #: Indicates that the health evaluation is for a node. The value is 12. + application = "Application" #: Indicates that the health evaluation is for an application. The value is 18. + delta_nodes_check = "DeltaNodesCheck" #: Indicates that the health evaluation is for the delta of unhealthy cluster nodes. The value is 19. + upgrade_domain_delta_nodes_check = "UpgradeDomainDeltaNodesCheck" #: Indicates that the health evaluation is for the delta of unhealthy upgrade domain cluster nodes. The value is 20. + application_type_applications = "ApplicationTypeApplications" #: – Indicates that the health evaluation is for applications of an application type. The value is 21. + node_type_nodes = "NodeTypeNodes" #: – Indicates that the health evaluation is for nodes of a node type. The value is 22. + + +class Ordering(str, Enum): + + desc = "Desc" #: Descending sort order. + asc = "Asc" #: Ascending sort order. + + +class NodeDeactivationIntent(str, Enum): + + invalid = "Invalid" #: Indicates the node deactivation intent is invalid. All Service Fabric enumerations have the invalid type. The value is zero. This value is not used. + pause = "Pause" #: Indicates that the node should be paused. The value is 1. + restart = "Restart" #: Indicates that the intent is for the node to be restarted after a short period of time. Service Fabric does not restart the node, this action is done outside of Service Fabric. The value is 2. + remove_data = "RemoveData" #: Indicates that the intent is to reimage the node. Service Fabric does not reimage the node, this action is done outside of Service Fabric. The value is 3. + remove_node = "RemoveNode" #: Indicates that the node is being decommissioned and is not expected to return. Service Fabric does not decommission the node, this action is done outside of Service Fabric. The value is 4. + + +class NodeDeactivationStatus(str, Enum): + + none = "None" #: No status is associated with the task. The value is zero. + safety_check_in_progress = "SafetyCheckInProgress" #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe to proceed to ensure availability of the service and reliability of the state. This value indicates that one or more safety checks are in progress. The value is 1. + safety_check_complete = "SafetyCheckComplete" #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe to proceed to ensure availability of the service and reliability of the state. This value indicates that all safety checks have been completed. The value is 2. + completed = "Completed" #: The task is completed. The value is 3. + + +class NodeDeactivationTaskType(str, Enum): + + invalid = "Invalid" #: Indicates the node deactivation task type is invalid. All Service Fabric enumerations have the invalid type. The value is zero. This value is not used. + infrastructure = "Infrastructure" #: Specifies the task created by Infrastructure hosting the nodes. The value is 1. + repair = "Repair" #: Specifies the task that was created by the Repair Manager service. The value is 2. + client = "Client" #: Specifies that the task was created by using the public API. The value is 3. + + +class NodeStatus(str, Enum): + + invalid = "Invalid" #: Indicates the node status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + up = "Up" #: Indicates the node is up. The value is 1. + down = "Down" #: Indicates the node is down. The value is 2. + enabling = "Enabling" #: Indicates the node is in process of being enabled. The value is 3. + disabling = "Disabling" #: Indicates the node is in the process of being disabled. The value is 4. + disabled = "Disabled" #: Indicates the node is disabled. The value is 5. + unknown = "Unknown" #: Indicates the node is unknown. A node would be in Unknown state if Service Fabric does not have authoritative information about that node. This can happen if the system learns about a node at runtime.The value is 6. + removed = "Removed" #: Indicates the node is removed. A node would be in Removed state if NodeStateRemoved API has been called for this node. In other words, Service Fabric has been informed that the persisted state on the node has been permanently lost. The value is 7. + + +class ServicePartitionStatus(str, Enum): + + invalid = "Invalid" #: Indicates the partition status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + ready = "Ready" #: Indicates that the partition is ready. This means that for a stateless service partition there is at least one instance that is up and for a stateful service partition the number of ready replicas is greater than or equal to the MinReplicaSetSize. The value is 1. + not_ready = "NotReady" #: Indicates that the partition is not ready. This status is returned when none of the other states apply. The value is 2. + in_quorum_loss = "InQuorumLoss" #: Indicates that the partition is in quorum loss. This means that number of replicas that are up and participating in a replica set is less than MinReplicaSetSize for this partition. The value is 3. + reconfiguring = "Reconfiguring" #: Indicates that the partition is undergoing reconfiguration of its replica sets. This can happen due to failover, upgrade, load balancing or addition or removal of replicas from the replica set. The value is 4. + deleting = "Deleting" #: Indicates that the partition is being deleted. The value is 5. + + +class ServiceStatus(str, Enum): + + unknown = "Unknown" #: Indicates the service status is unknown. The value is zero. + active = "Active" #: Indicates the service status is active. The value is 1. + upgrading = "Upgrading" #: Indicates the service is upgrading. The value is 2. + deleting = "Deleting" #: Indicates the service is being deleted. The value is 3. + creating = "Creating" #: Indicates the service is being created. The value is 4. + failed = "Failed" #: Indicates creation or deletion was terminated due to persistent failures. Another create/delete request can be accepted. The value is 5. + + +class ProvisionApplicationTypeKind(str, Enum): + + invalid = "Invalid" #: Indicates that the provision kind is invalid. This value is default and should not be used. The value is zero. + image_store_path = "ImageStorePath" #: Indicates that the provision is for a package that was previously uploaded to the image store. The value is 1. + external_store = "ExternalStore" #: Indicates that the provision is for an application package that was previously uploaded to an external store. The application package ends with the extension *.sfpkg. The value is 2. + + +class UpgradeType(str, Enum): + + invalid = "Invalid" #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + rolling = "Rolling" #: The upgrade progresses one upgrade domain at a time. The value is 1. + rolling_force_restart = "Rolling_ForceRestart" #: The upgrade gets restarted by force. The value is 2. + + +class SafetyCheckKind(str, Enum): + + invalid = "Invalid" #: Indicates that the upgrade safety check kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + ensure_seed_node_quorum = "EnsureSeedNodeQuorum" #: Indicates that if we bring down the node then this will result in global seed node quorum loss. The value is 1. + ensure_partition_quorum = "EnsurePartitionQuorum" #: Indicates that there is some partition for which if we bring down the replica on the node, it will result in quorum loss for that partition. The value is 2. + wait_for_primary_placement = "WaitForPrimaryPlacement" #: Indicates that there is some replica on the node that was moved out of this node due to upgrade. Service Fabric is now waiting for the primary to be moved back to this node. The value is 3. + wait_for_primary_swap = "WaitForPrimarySwap" #: Indicates that Service Fabric is waiting for a primary replica to be moved out of the node before starting upgrade on that node. The value is 4. + wait_for_reconfiguration = "WaitForReconfiguration" #: Indicates that there is some replica on the node that is involved in a reconfiguration. Service Fabric is waiting for the reconfiguration to be complete before staring upgrade on that node. The value is 5. + wait_for_inbuild_replica = "WaitForInbuildReplica" #: Indicates that there is either a replica on the node that is going through copy, or there is a primary replica on the node that is copying data to some other replica. In both cases, bringing down the replica on the node due to upgrade will abort the copy. The value is 6. + ensure_availability = "EnsureAvailability" #: Indicates that there is either a stateless service partition on the node having exactly one instance, or there is a primary replica on the node for which the partition is quorum loss. In both cases, bringing down the replicas due to upgrade will result in loss of availability. The value is 7. + + +class CreateFabricDump(str, Enum): + + false = "False" + true = "True" + + +class ServicePackageActivationMode(str, Enum): + + shared_process = "SharedProcess" #: This is the default activation mode. With this activation mode, replicas or instances from different partition(s) of service, on a given node, will share same activation of service package on a node. The value is zero. + exclusive_process = "ExclusiveProcess" #: With this activation mode, each replica or instance of service, on a given node, will have its own dedicated activation of service package on a node. The value is 1. + + +class ServiceKind(str, Enum): + + invalid = "Invalid" #: Indicates the service kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + stateless = "Stateless" #: Does not use Service Fabric to make its state highly available or reliable. The value is 1. + stateful = "Stateful" #: Uses Service Fabric to make its state or part of its state highly available and reliable. The value is 2. + + +class ServicePartitionKind(str, Enum): + + invalid = "Invalid" #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + singleton = "Singleton" #: Indicates that there is only one partition, and SingletonPartitionSchemeDescription was specified while creating the service. The value is 1. + int64_range = "Int64Range" #: Indicates that the partition is based on Int64 key ranges, and UniformInt64RangePartitionSchemeDescription was specified while creating the service. The value is 2. + named = "Named" #: Indicates that the partition is based on string names, and NamedPartitionInformation was specified while creating the service. The value is 3. + + +class ServicePlacementPolicyType(str, Enum): + + invalid = "Invalid" #: Indicates the type of the placement policy is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + invalid_domain = "InvalidDomain" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or upgrade domain cannot be used for placement of this service. The value is 1. + require_domain = "RequireDomain" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the service must be placed in a specific domain. The value is 2. + prefer_primary_domain = "PreferPrimaryDomain" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the Primary replica for the partitions of the service should be located in a particular domain as an optimization. The value is 3. + require_domain_distribution = "RequireDomainDistribution" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementRequireDomainDistributionPolicyDescription, indicating that the system will disallow placement of any two replicas from the same partition in the same domain at any time. The value is 4. + non_partially_place_service = "NonPartiallyPlaceService" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible all replicas of a particular partition of the service should be placed atomically. The value is 5. + allow_multiple_stateless_instances_on_node = "AllowMultipleStatelessInstancesOnNode" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, which indicates that multiple stateless instances of a particular partition of the service can be placed on a node. The value is 6. + + +class ServiceLoadMetricWeight(str, Enum): + + zero = "Zero" #: Disables resource balancing for this metric. This value is zero. + low = "Low" #: Specifies the metric weight of the service load as Low. The value is 1. + medium = "Medium" #: Specifies the metric weight of the service load as Medium. The value is 2. + high = "High" #: Specifies the metric weight of the service load as High. The value is 3. + + +class HostType(str, Enum): + + invalid = "Invalid" #: Indicates the type of host is not known or invalid. The value is 0. + exe_host = "ExeHost" #: Indicates the host is an executable. The value is 1. + container_host = "ContainerHost" #: Indicates the host is a container. The value is 2. + + +class HostIsolationMode(str, Enum): + + none = "None" #: Indicates the isolation mode is not applicable for given HostType. The value is 0. + process = "Process" #: This is the default isolation mode for a ContainerHost. The value is 1. + hyper_v = "HyperV" #: Indicates the ContainerHost is a Hyper-V container. This applies to only Windows containers. The value is 2. + + +class DeploymentStatus(str, Enum): + + invalid = "Invalid" #: Indicates status of the application or service package is not known or invalid. The value is 0. + downloading = "Downloading" #: Indicates the application or service package is being downloaded to the node from the ImageStore. The value is 1. + activating = "Activating" #: Indicates the application or service package is being activated. The value is 2. + active = "Active" #: Indicates the application or service package is active the node. The value is 3. + upgrading = "Upgrading" #: Indicates the application or service package is being upgraded. The value is 4. + deactivating = "Deactivating" #: Indicates the application or service package is being deactivated. The value is 5. + ran_to_completion = "RanToCompletion" #: Indicates the application or service package has ran to completion successfully. The value is 6. + failed = "Failed" #: Indicates the application or service package has failed to run to completion. The value is 7. + + +class EntryPointStatus(str, Enum): + + invalid = "Invalid" #: Indicates status of entry point is not known or invalid. The value is 0. + pending = "Pending" #: Indicates the entry point is scheduled to be started. The value is 1. + starting = "Starting" #: Indicates the entry point is being started. The value is 2. + started = "Started" #: Indicates the entry point was started successfully and is running. The value is 3. + stopping = "Stopping" #: Indicates the entry point is being stopped. The value is 4. + stopped = "Stopped" #: Indicates the entry point is not running. The value is 5. + + +class ChaosStatus(str, Enum): + + invalid = "Invalid" #: Indicates an invalid Chaos status. All Service Fabric enumerations have the invalid type. The value is zero. + running = "Running" #: Indicates that Chaos is not stopped. The value is one. + stopped = "Stopped" #: Indicates that Chaos is not scheduling further faults. The value is two. + + +class ChaosScheduleStatus(str, Enum): + + invalid = "Invalid" #: Indicates an invalid Chaos Schedule status. All Service Fabric enumerations have the invalid type. The value is zero. + stopped = "Stopped" #: Indicates that the schedule is stopped and not being used to schedule runs of chaos. The value is one. + active = "Active" #: Indicates that the schedule is active and is being used to schedule runs of Chaos. The value is two. + expired = "Expired" #: Indicates that the schedule is expired and will no longer be used to schedule runs of Chaos. The value is three. + pending = "Pending" #: Indicates that the schedule is pending and is not yet being used to schedule runs of Chaos but will be used when the start time is passed. The value is four. + + +class ChaosEventKind(str, Enum): + + invalid = "Invalid" #: Indicates an invalid Chaos event kind. All Service Fabric enumerations have the invalid type. + started = "Started" #: Indicates a Chaos event that gets generated when Chaos is started. + executing_faults = "ExecutingFaults" #: Indicates a Chaos event that gets generated when Chaos has decided on the faults for an iteration. This Chaos event contains the details of the faults as a list of strings. + waiting = "Waiting" #: Indicates a Chaos event that gets generated when Chaos is waiting for the cluster to become ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. + validation_failed = "ValidationFailed" #: Indicates a Chaos event that gets generated when the cluster entities do not become stable and healthy within ChaosParameters.MaxClusterStabilizationTimeoutInSeconds. + test_error = "TestError" #: Indicates a Chaos event that gets generated when an unexpected event has occurred in the Chaos engine, for example, due to the cluster snapshot being inconsistent, while faulting a faultable entity Chaos found that the entity was already faulted. + stopped = "Stopped" #: Indicates a Chaos event that gets generated when Chaos stops because either the user issued a stop or the time to run was up. + + +class ComposeDeploymentStatus(str, Enum): + + invalid = "Invalid" #: Indicates that the compose deployment status is invalid. The value is zero. + provisioning = "Provisioning" #: Indicates that the compose deployment is being provisioned in background. The value is 1. + creating = "Creating" #: Indicates that the compose deployment is being created in background. The value is 2. + ready = "Ready" #: Indicates that the compose deployment has been successfully created or upgraded. The value is 3. + unprovisioning = "Unprovisioning" #: Indicates that the compose deployment is being unprovisioned in background. The value is 4. + deleting = "Deleting" #: Indicates that the compose deployment is being deleted in background. The value is 5. + failed = "Failed" #: Indicates that the compose deployment was terminated due to persistent failures. The value is 6. + upgrading = "Upgrading" #: Indicates that the compose deployment is being upgraded in the background. The value is 7. + + +class ComposeDeploymentUpgradeState(str, Enum): + + invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + provisioning_target = "ProvisioningTarget" #: The upgrade is in the progress of provisioning target application type version. The value is 1. + rolling_forward_in_progress = "RollingForwardInProgress" #: The upgrade is rolling forward to the target version but is not complete yet. The value is 2. + rolling_forward_pending = "RollingForwardPending" #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an explicit move next request in UnmonitoredManual mode or performing health checks in Monitored mode. The value is 3 + unprovisioning_current = "UnprovisioningCurrent" #: The upgrade is in the progress of unprovisioning current application type version and rolling forward to the target version is completed. The value is 4. + rolling_forward_completed = "RollingForwardCompleted" #: The upgrade has finished rolling forward. The value is 5. + rolling_back_in_progress = "RollingBackInProgress" #: The upgrade is rolling back to the previous version but is not complete yet. The value is 6. + unprovisioning_target = "UnprovisioningTarget" #: The upgrade is in the progress of unprovisioning target application type version and rolling back to the current version is completed. The value is 7. + rolling_back_completed = "RollingBackCompleted" #: The upgrade has finished rolling back. The value is 8. + failed = "Failed" #: The upgrade has failed and is unable to execute FailureAction. The value is 9. + + +class ServiceCorrelationScheme(str, Enum): + + invalid = "Invalid" #: An invalid correlation scheme. Cannot be used. The value is zero. + affinity = "Affinity" #: Indicates that this service has an affinity relationship with another service. Provided for backwards compatibility, consider preferring the Aligned or NonAlignedAffinity options. The value is 1. + aligned_affinity = "AlignedAffinity" #: Aligned affinity ensures that the primaries of the partitions of the affinitized services are collocated on the same nodes. This is the default and is the same as selecting the Affinity scheme. The value is 2. + non_aligned_affinity = "NonAlignedAffinity" #: Non-Aligned affinity guarantees that all replicas of each service will be placed on the same nodes. Unlike Aligned Affinity, this does not guarantee that replicas of particular role will be collocated. The value is 3. + + +class MoveCost(str, Enum): + + zero = "Zero" #: Zero move cost. This value is zero. + low = "Low" #: Specifies the move cost of the service as Low. The value is 1. + medium = "Medium" #: Specifies the move cost of the service as Medium. The value is 2. + high = "High" #: Specifies the move cost of the service as High. The value is 3. + very_high = "VeryHigh" #: Specifies the move cost of the service as VeryHigh. The value is 4. + + +class PartitionScheme(str, Enum): + + invalid = "Invalid" #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + singleton = "Singleton" #: Indicates that the partition is based on string names, and is a SingletonPartitionSchemeDescription object, The value is 1. + uniform_int64_range = "UniformInt64Range" #: Indicates that the partition is based on Int64 key ranges, and is a UniformInt64RangePartitionSchemeDescription object. The value is 2. + named = "Named" #: Indicates that the partition is based on string names, and is a NamedPartitionSchemeDescription object. The value is 3 + + +class ServiceOperationName(str, Enum): + + unknown = "Unknown" #: Reserved for future use. + none = "None" #: The service replica or instance is not going through any life-cycle changes. + open = "Open" #: The service replica or instance is being opened. + change_role = "ChangeRole" #: The service replica is changing roles. + close = "Close" #: The service replica or instance is being closed. + abort = "Abort" #: The service replica or instance is being aborted. + + +class ReplicatorOperationName(str, Enum): + + invalid = "Invalid" #: Default value if the replicator is not yet ready. + none = "None" #: Replicator is not running any operation from Service Fabric perspective. + open = "Open" #: Replicator is opening. + change_role = "ChangeRole" #: Replicator is in the process of changing its role. + update_epoch = "UpdateEpoch" #: Due to a change in the replica set, replicator is being updated with its Epoch. + close = "Close" #: Replicator is closing. + abort = "Abort" #: Replicator is being aborted. + on_data_loss = "OnDataLoss" #: Replicator is handling the data loss condition, where the user service may potentially be recovering state from an external source. + wait_for_catchup = "WaitForCatchup" #: Replicator is waiting for a quorum of replicas to be caught up to the latest state. + build = "Build" #: Replicator is in the process of building one or more replicas. + + +class PartitionAccessStatus(str, Enum): + + invalid = "Invalid" #: Indicates that the read or write operation access status is not valid. This value is not returned to the caller. + granted = "Granted" #: Indicates that the read or write operation access is granted and the operation is allowed. + reconfiguration_pending = "ReconfigurationPending" #: Indicates that the client should try again later, because a reconfiguration is in progress. + not_primary = "NotPrimary" #: Indicates that this client request was received by a replica that is not a Primary replica. + no_write_quorum = "NoWriteQuorum" #: Indicates that no write quorum is available and, therefore, no write operation can be accepted. + + +class FabricReplicaStatus(str, Enum): + + invalid = "Invalid" #: Indicates that the read or write operation access status is not valid. This value is not returned to the caller. + down = "Down" #: Indicates that the replica is down. + up = "Up" #: Indicates that the replica is up. + + +class ReplicaKind(str, Enum): + + invalid = "Invalid" #: Represents an invalid replica kind. The value is zero. + key_value_store = "KeyValueStore" #: Represents a key value store replica. The value is 1 + + +class ServiceTypeRegistrationStatus(str, Enum): + + invalid = "Invalid" #: Indicates the registration status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + disabled = "Disabled" #: Indicates that the service type is disabled on this node. A type gets disabled when there are too many failures of the code package hosting the service type. If the service type is disabled, new replicas of that service type will not be placed on the node until it is enabled again. The service type is enabled again after the process hosting it comes up and re-registers the type or a preconfigured time interval has passed. The value is 1. + enabled = "Enabled" #: Indicates that the service type is enabled on this node. Replicas of this service type can be placed on this node when the code package registers the service type. The value is 2. + registered = "Registered" #: Indicates that the service type is enabled and registered on the node by a code package. Replicas of this service type can now be placed on this node. The value is 3. + + +class ServiceEndpointRole(str, Enum): + + invalid = "Invalid" #: Indicates the service endpoint role is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + stateless = "Stateless" #: Indicates that the service endpoint is of a stateless service. The value is 1. + stateful_primary = "StatefulPrimary" #: Indicates that the service endpoint is of a primary replica of a stateful service. The value is 2. + stateful_secondary = "StatefulSecondary" #: Indicates that the service endpoint is of a secondary replica of a stateful service. The value is 3. + + +class OperationState(str, Enum): + + invalid = "Invalid" #: The operation state is invalid. + running = "Running" #: The operation is in progress. + rolling_back = "RollingBack" #: The operation is rolling back internal system state because it encountered a fatal error or was cancelled by the user. "RollingBack" does not refer to user state. For example, if CancelOperation is called on a command of type PartitionDataLoss, state of "RollingBack" does not mean service data is being restored (assuming the command has progressed far enough to cause data loss). It means the system is rolling back/cleaning up internal system state associated with the command. + completed = "Completed" #: The operation has completed successfully and is no longer running. + faulted = "Faulted" #: The operation has failed and is no longer running. + cancelled = "Cancelled" #: The operation was cancelled by the user using CancelOperation, and is no longer running. + force_cancelled = "ForceCancelled" #: The operation was cancelled by the user using CancelOperation, with the force parameter set to true. It is no longer running. Refer to CancelOperation for more details. + + +class OperationType(str, Enum): + + invalid = "Invalid" #: The operation state is invalid. + partition_data_loss = "PartitionDataLoss" #: An operation started using the StartDataLoss API. + partition_quorum_loss = "PartitionQuorumLoss" #: An operation started using the StartQuorumLoss API. + partition_restart = "PartitionRestart" #: An operation started using the StartPartitionRestart API. + node_transition = "NodeTransition" #: An operation started using the StartNodeTransition API. + + +class PackageSharingPolicyScope(str, Enum): + + none = "None" #: No package sharing policy scope. The value is 0. + all = "All" #: Share all code, config and data packages from corresponding service manifest. The value is 1. + code = "Code" #: Share all code packages from corresponding service manifest. The value is 2. + config = "Config" #: Share all config packages from corresponding service manifest. The value is 3. + data = "Data" #: Share all data packages from corresponding service manifest. The value is 4. + + +class PropertyValueKind(str, Enum): + + invalid = "Invalid" #: Indicates the property is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + binary = "Binary" #: The data inside the property is a binary blob. The value is 1. + int64 = "Int64" #: The data inside the property is an int64. The value is 2. + double = "Double" #: The data inside the property is a double. The value is 3. + string = "String" #: The data inside the property is a string. The value is 4. + guid = "Guid" #: The data inside the property is a guid. The value is 5. + + +class PropertyBatchOperationKind(str, Enum): + + invalid = "Invalid" #: Indicates the property operation is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + put = "Put" #: The operation will create or edit a property. The value is 1. + get = "Get" #: The operation will get a property. The value is 2. + check_exists = "CheckExists" #: The operation will check that a property exists or doesn't exists, depending on the provided value. The value is 3. + check_sequence = "CheckSequence" #: The operation will ensure that the sequence number is equal to the provided value. The value is 4. + delete = "Delete" #: The operation will delete a property. The value is 5. + check_value = "CheckValue" #: The operation will ensure that the value of a property is equal to the provided value. The value is 7. + + +class PropertyBatchInfoKind(str, Enum): + + invalid = "Invalid" #: Indicates the property batch info is invalid. All Service Fabric enumerations have the invalid type. + successful = "Successful" #: The property batch succeeded. + failed = "Failed" #: The property batch failed. + + +class RetentionPolicyType(str, Enum): + + basic = "Basic" #: Indicates a basic retention policy type. + invalid = "Invalid" #: Indicates an invalid retention policy type. + + +class BackupStorageKind(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup storage kind. All Service Fabric enumerations have the invalid type. + file_share = "FileShare" #: Indicates file/ SMB share to be used as backup storage. + azure_blob_store = "AzureBlobStore" #: Indicates Azure blob store to be used as backup storage. + dsms_azure_blob_store = "DsmsAzureBlobStore" #: Indicates Dsms Azure blob store to be used as backup storage. + managed_identity_azure_blob_store = "ManagedIdentityAzureBlobStore" #: Indicates Azure blob store to be used as backup storage using managed identity. + + +class BackupScheduleKind(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup schedule kind. All Service Fabric enumerations have the invalid type. + time_based = "TimeBased" #: Indicates a time-based backup schedule. + frequency_based = "FrequencyBased" #: Indicates a frequency-based backup schedule. + + +class BackupPolicyScope(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup policy scope type. All Service Fabric enumerations have the invalid type. + partition = "Partition" #: Indicates the backup policy is applied at partition level. Hence overriding any policy which may have applied at partition's service or application level. + service = "Service" #: Indicates the backup policy is applied at service level. All partitions of the service inherit this policy unless explicitly overridden at partition level. + application = "Application" #: Indicates the backup policy is applied at application level. All services and partitions of the application inherit this policy unless explicitly overridden at service or partition level. + + +class BackupSuspensionScope(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup suspension scope type also indicating entity is not suspended. All Service Fabric enumerations have the invalid type. + partition = "Partition" #: Indicates the backup suspension is applied at partition level. + service = "Service" #: Indicates the backup suspension is applied at service level. All partitions of the service are hence suspended for backup. + application = "Application" #: Indicates the backup suspension is applied at application level. All services and partitions of the application are hence suspended for backup. + + +class RestoreState(str, Enum): + + invalid = "Invalid" #: Indicates an invalid restore state. All Service Fabric enumerations have the invalid type. + accepted = "Accepted" #: Operation has been validated and accepted. Restore is yet to be triggered. + restore_in_progress = "RestoreInProgress" #: Restore operation has been triggered and is under process. + success = "Success" #: Operation completed with success. + failure = "Failure" #: Operation completed with failure. + timeout = "Timeout" #: Operation timed out. + + +class BackupType(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup type. All Service Fabric enumerations have the invalid type. + full = "Full" #: Indicates a full backup. + incremental = "Incremental" #: Indicates an incremental backup. A backup chain is comprised of a full backup followed by 0 or more incremental backups. + + +class ManagedIdentityType(str, Enum): + + invalid = "Invalid" #: Indicates an invalid managed identity type. All Service Fabric enumerations have the invalid type. + vmss = "VMSS" #: Indicates VMSS managed identity should be used to connect to Azure blob store. + cluster = "Cluster" #: Indicates cluster managed identity should be used to connect to Azure blob store. + + +class BackupScheduleFrequencyType(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup schedule frequency type. All Service Fabric enumerations have the invalid type. + daily = "Daily" #: Indicates that the time based backup schedule is repeated at a daily frequency. + weekly = "Weekly" #: Indicates that the time based backup schedule is repeated at a weekly frequency. + + +class DayOfWeek(str, Enum): + + sunday = "Sunday" #: Indicates the Day referred is Sunday. + monday = "Monday" #: Indicates the Day referred is Monday. + tuesday = "Tuesday" #: Indicates the Day referred is Tuesday. + wednesday = "Wednesday" #: Indicates the Day referred is Wednesday. + thursday = "Thursday" #: Indicates the Day referred is Thursday. + friday = "Friday" #: Indicates the Day referred is Friday. + saturday = "Saturday" #: Indicates the Day referred is Saturday. + + +class BackupState(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup state. All Service Fabric enumerations have the invalid type. + accepted = "Accepted" #: Operation has been validated and accepted. Backup is yet to be triggered. + backup_in_progress = "BackupInProgress" #: Backup operation has been triggered and is under process. + success = "Success" #: Operation completed with success. + failure = "Failure" #: Operation completed with failure. + timeout = "Timeout" #: Operation timed out. + + +class BackupEntityKind(str, Enum): + + invalid = "Invalid" #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. + partition = "Partition" #: Indicates the entity is a Service Fabric partition. + service = "Service" #: Indicates the entity is a Service Fabric service. + application = "Application" #: Indicates the entity is a Service Fabric application. + + +class ImpactLevel(str, Enum): + + invalid = "Invalid" + none = "None" + restart = "Restart" + remove_data = "RemoveData" + remove_node = "RemoveNode" + + +class RepairImpactKind(str, Enum): + + invalid = "Invalid" #: The repair impact is not valid or is of an unknown type. + node = "Node" #: The repair impact affects a set of Service Fabric nodes. + + +class RepairTargetKind(str, Enum): + + invalid = "Invalid" #: The repair target is not valid or is of an unknown type. + node = "Node" #: The repair target is a set of Service Fabric nodes. + + +class State(str, Enum): + + invalid = "Invalid" #: Indicates that the repair task state is invalid. All Service Fabric enumerations have the invalid value. + created = "Created" #: Indicates that the repair task has been created. + claimed = "Claimed" #: Indicates that the repair task has been claimed by a repair executor. + preparing = "Preparing" #: Indicates that the Repair Manager is preparing the system to handle the impact of the repair task, usually by taking resources offline gracefully. + approved = "Approved" #: Indicates that the repair task has been approved by the Repair Manager and is safe to execute. + executing = "Executing" #: Indicates that execution of the repair task is in progress. + restoring = "Restoring" #: Indicates that the Repair Manager is restoring the system to its pre-repair state, usually by bringing resources back online. + completed = "Completed" #: Indicates that the repair task has completed, and no further state changes will occur. + + +class ResultStatus(str, Enum): + + invalid = "Invalid" #: Indicates that the repair task result is invalid. All Service Fabric enumerations have the invalid value. + succeeded = "Succeeded" #: Indicates that the repair task completed execution successfully. + cancelled = "Cancelled" #: Indicates that the repair task was cancelled prior to execution. + interrupted = "Interrupted" #: Indicates that execution of the repair task was interrupted by a cancellation request after some work had already been performed. + failed = "Failed" #: Indicates that there was a failure during execution of the repair task. Some work may have been performed. + pending = "Pending" #: Indicates that the repair task result is not yet available, because the repair task has not finished executing. + + +class RepairTaskHealthCheckState(str, Enum): + + not_started = "NotStarted" #: Indicates that the health check has not started. + in_progress = "InProgress" #: Indicates that the health check is in progress. + succeeded = "Succeeded" #: Indicates that the health check succeeded. + skipped = "Skipped" #: Indicates that the health check was skipped. + timed_out = "TimedOut" #: Indicates that the health check timed out. + + +class ScalingTriggerKind(str, Enum): + + invalid = "Invalid" #: Indicates the scaling trigger is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + average_partition_load = "AveragePartitionLoad" #: Indicates a trigger where scaling decisions are made based on average load of a partition. The value is 1. + average_service_load = "AverageServiceLoad" #: Indicates a trigger where scaling decisions are made based on average load of a service. The value is 2. + + +class ScalingMechanismKind(str, Enum): + + invalid = "Invalid" #: Indicates the scaling mechanism is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + partition_instance_count = "PartitionInstanceCount" #: Indicates a mechanism for scaling where new instances are added or removed from a partition. The value is 1. + add_remove_incremental_named_partition = "AddRemoveIncrementalNamedPartition" #: Indicates a mechanism for scaling where new named partitions are added or removed from a service. The value is 2. + + +class ResourceStatus(str, Enum): + + unknown = "Unknown" #: Indicates the resource status is unknown. The value is zero. + ready = "Ready" #: Indicates the resource is ready. The value is 1. + upgrading = "Upgrading" #: Indicates the resource is upgrading. The value is 2. + creating = "Creating" #: Indicates the resource is being created. The value is 3. + deleting = "Deleting" #: Indicates the resource is being deleted. The value is 4. + failed = "Failed" #: Indicates the resource is not functional due to persistent failures. See statusDetails property for more details. The value is 5. + + +class SecretKind(str, Enum): + + inlined_value = "inlinedValue" #: A simple secret resource whose plaintext value is provided by the user. + key_vault_versioned_reference = "keyVaultVersionedReference" #: A secret resource that references a specific version of a secret stored in Azure Key Vault; the expected value is a versioned KeyVault URI corresponding to the version of the secret being referenced. + + +class VolumeProvider(str, Enum): + + sf_azure_file = "SFAzureFile" #: Provides volumes that are backed by Azure Files. + + +class SizeTypes(str, Enum): + + small = "Small" + medium = "Medium" + large = "Large" + + +class ApplicationScopedVolumeKind(str, Enum): + + service_fabric_volume_disk = "ServiceFabricVolumeDisk" #: Provides Service Fabric High Availability Volume Disk + + +class NetworkKind(str, Enum): + + local = "Local" #: Indicates a container network local to a single Service Fabric cluster. The value is 1. + + +class HeaderMatchType(str, Enum): + + exact = "exact" + + +class OperatingSystemType(str, Enum): + + linux = "Linux" #: The required operating system is Linux. + windows = "Windows" #: The required operating system is Windows. + + +class ImageRegistryPasswordType(str, Enum): + + clear_text = "ClearText" #: The image registry password in clear text, will not be processed in any way and used directly + key_vault_reference = "KeyVaultReference" #: The URI to a KeyVault secret version, will be resolved using the application's managed identity (this type is only valid if the app was assigned a managed identity) before getting used + secret_value_reference = "SecretValueReference" #: The reference to a SecretValue resource, will be resolved before getting used + + +class EnvironmentVariableType(str, Enum): + + clear_text = "ClearText" #: The environment variable in clear text, will not be processed in any way and passed in as is + key_vault_reference = "KeyVaultReference" #: The URI to a KeyVault secret version, will be resolved using the application's managed identity (this type is only valid if the app was assigned a managed identity) before getting passed in + secret_value_reference = "SecretValueReference" #: The reference to a SecretValue resource, will be resolved before getting passed in + + +class SettingType(str, Enum): + + clear_text = "ClearText" #: The setting in clear text, will not be processed in any way and passed in as is + key_vault_reference = "KeyVaultReference" #: The URI to a KeyVault secret version, will be resolved using the application's managed identity (this type is only valid if the app was assigned a managed identity) before getting passed in + secret_value_reference = "SecretValueReference" #: The reference to a SecretValue resource, will be resolved before getting passed in + + +class Scheme(str, Enum): + + http = "http" #: Indicates that the probe is http. + https = "https" #: Indicates that the probe is https. No cert validation. + + +class ApplicationResourceUpgradeState(str, Enum): + + invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is 0. + provisioning_target = "ProvisioningTarget" #: The upgrade is in the progress of provisioning target application type version. The value is 1. + rolling_forward = "RollingForward" #: The upgrade is rolling forward to the target version but is not complete yet. The value is 2. + unprovisioning_current = "UnprovisioningCurrent" #: The upgrade is in the progress of unprovisioning current application type version and rolling forward to the target version is completed. The value is 3. + completed_rollforward = "CompletedRollforward" #: The upgrade has finished rolling forward. The value is 4. + rolling_back = "RollingBack" #: The upgrade is rolling back to the previous version but is not complete yet. The value is 5. + unprovisioning_target = "UnprovisioningTarget" #: The upgrade is in the progress of unprovisioning target application type version and rolling back to the current version is completed. The value is 6. + completed_rollback = "CompletedRollback" #: The upgrade has finished rolling back. The value is 7. + failed = "Failed" #: The upgrade has failed and is unable to execute FailureAction. The value is 8. + + +class RollingUpgradeMode(str, Enum): + + invalid = "Invalid" #: Indicates the upgrade mode is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + unmonitored_auto = "UnmonitoredAuto" #: The upgrade will proceed automatically without performing any health monitoring. The value is 1 + unmonitored_manual = "UnmonitoredManual" #: The upgrade will stop after completing each upgrade domain, giving the opportunity to manually monitor health before proceeding. The value is 2 + monitored = "Monitored" #: The upgrade will stop after completing each upgrade domain and automatically monitor health before proceeding. The value is 3 + + +class DiagnosticsSinkKind(str, Enum): + + invalid = "Invalid" #: Indicates an invalid sink kind. All Service Fabric enumerations have the invalid type. + azure_internal_monitoring_pipeline = "AzureInternalMonitoringPipeline" #: Diagnostics settings for Geneva. + + +class AutoScalingMechanismKind(str, Enum): + + add_remove_replica = "AddRemoveReplica" #: Indicates that scaling should be performed by adding or removing replicas. + + +class AutoScalingMetricKind(str, Enum): + + resource = "Resource" #: Indicates that the metric is one of resources, like cpu or memory. + + +class AutoScalingResourceMetricName(str, Enum): + + cpu = "cpu" #: Indicates that the resource is CPU cores. + memory_in_gb = "memoryInGB" #: Indicates that the resource is memory in GB. + + +class AutoScalingTriggerKind(str, Enum): + + average_load = "AverageLoad" #: Indicates that scaling should be performed based on average load of all replicas in the service. + + +class ExecutionPolicyType(str, Enum): + + default = "Default" #: Indicates the default execution policy, always restart the service if an exit occurs. + run_to_completion = "RunToCompletion" #: Indicates that the service will perform its desired operation and complete successfully. If the service encounters failure, it will restarted based on restart policy specified. If the service completes its operation successfully, it will not be restarted again. + + +class RestartPolicy(str, Enum): + + on_failure = "OnFailure" #: Service will be restarted when it encounters a failure. + never = "Never" #: Service will never be restarted. If the service encounters a failure, it will move to Failed state. + + +class NodeStatusFilter(str, Enum): + + default = "default" #: This filter value will match all of the nodes excepts the ones with status as Unknown or Removed. + all = "all" #: This filter value will match all of the nodes. + up = "up" #: This filter value will match nodes that are Up. + down = "down" #: This filter value will match nodes that are Down. + enabling = "enabling" #: This filter value will match nodes that are in the process of being enabled with status as Enabling. + disabling = "disabling" #: This filter value will match nodes that are in the process of being disabled with status as Disabling. + disabled = "disabled" #: This filter value will match nodes that are Disabled. + unknown = "unknown" #: This filter value will match nodes whose status is Unknown. A node would be in Unknown state if Service Fabric does not have authoritative information about that node. This can happen if the system learns about a node at runtime. + removed = "removed" #: This filter value will match nodes whose status is Removed. These are the nodes that are removed from the cluster using the RemoveNodeState API. + + +class ReplicaHealthReportServiceKind(str, Enum): + + stateless = "Stateless" #: Does not use Service Fabric to make its state highly available or reliable. The value is 1 + stateful = "Stateful" #: Uses Service Fabric to make its state or part of its state highly available and reliable. The value is 2. + + +class DataLossMode(str, Enum): + + invalid = "Invalid" #: Reserved. Do not pass into API. + partial_data_loss = "PartialDataLoss" #: PartialDataLoss option will cause a quorum of replicas to go down, triggering an OnDataLoss event in the system for the given partition. + full_data_loss = "FullDataLoss" #: FullDataLoss option will drop all the replicas which means that all the data will be lost. + + +class NodeTransitionType(str, Enum): + + invalid = "Invalid" #: Reserved. Do not pass into API. + start = "Start" #: Transition a stopped node to up. + stop = "Stop" #: Transition an up node to stopped. + + +class QuorumLossMode(str, Enum): + + invalid = "Invalid" #: Reserved. Do not pass into API. + quorum_replicas = "QuorumReplicas" #: Partial Quorum loss mode : Minimum number of replicas for a partition will be down that will cause a quorum loss. + all_replicas = "AllReplicas" + + +class RestartPartitionMode(str, Enum): + + invalid = "Invalid" #: Reserved. Do not pass into API. + all_replicas_or_instances = "AllReplicasOrInstances" #: All replicas or instances in the partition are restarted at once. + only_active_secondaries = "OnlyActiveSecondaries" #: Only the secondary replicas are restarted. diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_apis_enums.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_apis_enums.py deleted file mode 100644 index 6507720047ec..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/models/_service_fabric_client_apis_enums.py +++ /dev/null @@ -1,2092 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class ApplicationDefinitionKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The mechanism used to define a Service Fabric application. - """ - - #: Indicates the application definition kind is invalid. All Service Fabric enumerations have the - #: invalid type. The value is 65535. - INVALID = "Invalid" - #: Indicates the application is defined by a Service Fabric application description. The value is - #: 0. - SERVICE_FABRIC_APPLICATION_DESCRIPTION = "ServiceFabricApplicationDescription" - #: Indicates the application is defined by compose file(s). The value is 1. - COMPOSE = "Compose" - -class ApplicationPackageCleanupPolicy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of action that needs to be taken for cleaning up the application package after - successful provision. - """ - - #: Indicates that the application package cleanup policy is invalid. This value is default. The - #: value is zero. - INVALID = "Invalid" - #: Indicates that the cleanup policy of application packages is based on the cluster setting - #: "CleanupApplicationPackageOnProvisionSuccess." The value is 1. - DEFAULT = "Default" - #: Indicates that the service fabric runtime determines when to do the application package - #: cleanup. By default, cleanup is done on successful provision. The value is 2. - AUTOMATIC = "Automatic" - #: Indicates that the user has to explicitly clean up the application package. The value is 3. - MANUAL = "Manual" - -class ApplicationResourceUpgradeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The state of the application resource upgrade. - """ - - #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. - #: The value is 0. - INVALID = "Invalid" - #: The upgrade is in the progress of provisioning target application type version. The value is 1. - PROVISIONING_TARGET = "ProvisioningTarget" - #: The upgrade is rolling forward to the target version but is not complete yet. The value is 2. - ROLLING_FORWARD = "RollingForward" - #: The upgrade is in the progress of unprovisioning current application type version and rolling - #: forward to the target version is completed. The value is 3. - UNPROVISIONING_CURRENT = "UnprovisioningCurrent" - #: The upgrade has finished rolling forward. The value is 4. - COMPLETED_ROLLFORWARD = "CompletedRollforward" - #: The upgrade is rolling back to the previous version but is not complete yet. The value is 5. - ROLLING_BACK = "RollingBack" - #: The upgrade is in the progress of unprovisioning target application type version and rolling - #: back to the current version is completed. The value is 6. - UNPROVISIONING_TARGET = "UnprovisioningTarget" - #: The upgrade has finished rolling back. The value is 7. - COMPLETED_ROLLBACK = "CompletedRollback" - #: The upgrade has failed and is unable to execute FailureAction. The value is 8. - FAILED = "Failed" - -class ApplicationScopedVolumeKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the application-scoped volume kind. - """ - - #: Provides Service Fabric High Availability Volume Disk. - SERVICE_FABRIC_VOLUME_DISK = "ServiceFabricVolumeDisk" - -class ApplicationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the application. - """ - - #: Indicates the application status is invalid. All Service Fabric enumerations have the invalid - #: type. The value is zero. - INVALID = "Invalid" - #: Indicates the application status is ready. The value is 1. - READY = "Ready" - #: Indicates the application status is upgrading. The value is 2. - UPGRADING = "Upgrading" - #: Indicates the application status is creating. The value is 3. - CREATING = "Creating" - #: Indicates the application status is deleting. The value is 4. - DELETING = "Deleting" - #: Indicates the creation or deletion of application was terminated due to persistent failures. - #: Another create/delete request can be accepted to resume a failed application. The value is 5. - FAILED = "Failed" - -class ApplicationTypeDefinitionKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The mechanism used to define a Service Fabric application type. - """ - - #: Indicates the application type definition kind is invalid. All Service Fabric enumerations have - #: the invalid type. The value is 0. - INVALID = "Invalid" - #: Indicates the application type is defined and created by a Service Fabric application package - #: provided by the user. The value is 1. - SERVICE_FABRIC_APPLICATION_PACKAGE = "ServiceFabricApplicationPackage" - #: Indicates the application type is defined and created implicitly as part of a compose - #: deployment. The value is 2. - COMPOSE = "Compose" - -class ApplicationTypeStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the application type. - """ - - #: Indicates the application type status is invalid. All Service Fabric enumerations have the - #: invalid type. The value is zero. - INVALID = "Invalid" - #: Indicates that the application type is being provisioned in the cluster. The value is 1. - PROVISIONING = "Provisioning" - #: Indicates that the application type is fully provisioned and is available for use. An - #: application of this type and version can be created. The value is 2. - AVAILABLE = "Available" - #: Indicates that the application type is in process of being unprovisioned from the cluster. The - #: value is 3. - UNPROVISIONING = "Unprovisioning" - #: Indicates that the application type provisioning failed and it is unavailable for use. The - #: failure details can be obtained from the application type information query. The failed - #: application type information remains in the cluster until it is unprovisioned or reprovisioned - #: successfully. The value is 4. - FAILED = "Failed" - -class AutoScalingMechanismKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumerates the mechanisms for auto scaling. - """ - - #: Indicates that scaling should be performed by adding or removing replicas. - ADD_REMOVE_REPLICA = "AddRemoveReplica" - -class AutoScalingMetricKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumerates the metrics that are used for triggering auto scaling. - """ - - #: Indicates that the metric is one of resources, like cpu or memory. - RESOURCE = "Resource" - -class AutoScalingResourceMetricName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumerates the resources that are used for triggering auto scaling. - """ - - #: Indicates that the resource is CPU cores. - CPU = "cpu" - #: Indicates that the resource is memory in GB. - MEMORY_IN_GB = "memoryInGB" - -class AutoScalingTriggerKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumerates the triggers for auto scaling. - """ - - #: Indicates that scaling should be performed based on average load of all replicas in the - #: service. - AVERAGE_LOAD = "AverageLoad" - -class BackupEntityKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The entity type of a Service Fabric entity such as Application, Service or a Partition where - periodic backups can be enabled. - """ - - #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. - INVALID = "Invalid" - #: Indicates the entity is a Service Fabric partition. - PARTITION = "Partition" - #: Indicates the entity is a Service Fabric service. - SERVICE = "Service" - #: Indicates the entity is a Service Fabric application. - APPLICATION = "Application" - -class BackupPolicyScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the scope at which the backup policy is applied. - """ - - #: Indicates an invalid backup policy scope type. All Service Fabric enumerations have the invalid - #: type. - INVALID = "Invalid" - #: Indicates the backup policy is applied at partition level. Hence overriding any policy which - #: may have applied at partition's service or application level. - PARTITION = "Partition" - #: Indicates the backup policy is applied at service level. All partitions of the service inherit - #: this policy unless explicitly overridden at partition level. - SERVICE = "Service" - #: Indicates the backup policy is applied at application level. All services and partitions of the - #: application inherit this policy unless explicitly overridden at service or partition level. - APPLICATION = "Application" - -class BackupScheduleFrequencyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Describes the frequency with which to run the time based backup schedule. - """ - - #: Indicates an invalid backup schedule frequency type. All Service Fabric enumerations have the - #: invalid type. - INVALID = "Invalid" - #: Indicates that the time based backup schedule is repeated at a daily frequency. - DAILY = "Daily" - #: Indicates that the time based backup schedule is repeated at a weekly frequency. - WEEKLY = "Weekly" - -class BackupScheduleKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of backup schedule, time based or frequency based. - """ - - #: Indicates an invalid backup schedule kind. All Service Fabric enumerations have the invalid - #: type. - INVALID = "Invalid" - #: Indicates a time-based backup schedule. - TIME_BASED = "TimeBased" - #: Indicates a frequency-based backup schedule. - FREQUENCY_BASED = "FrequencyBased" - -class BackupState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Represents the current state of the partition backup operation. - """ - - #: Indicates an invalid backup state. All Service Fabric enumerations have the invalid type. - INVALID = "Invalid" - #: Operation has been validated and accepted. Backup is yet to be triggered. - ACCEPTED = "Accepted" - #: Backup operation has been triggered and is under process. - BACKUP_IN_PROGRESS = "BackupInProgress" - #: Operation completed with success. - SUCCESS = "Success" - #: Operation completed with failure. - FAILURE = "Failure" - #: Operation timed out. - TIMEOUT = "Timeout" - -class BackupStorageKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of backup storage, where backups are saved. - """ - - #: Indicates an invalid backup storage kind. All Service Fabric enumerations have the invalid - #: type. - INVALID = "Invalid" - #: Indicates file/ SMB share to be used as backup storage. - FILE_SHARE = "FileShare" - #: Indicates Azure blob store to be used as backup storage. - AZURE_BLOB_STORE = "AzureBlobStore" - #: Indicates Dsms Azure blob store to be used as backup storage. - DSMS_AZURE_BLOB_STORE = "DsmsAzureBlobStore" - #: Indicates Azure blob store to be used as backup storage using managed identity. - MANAGED_IDENTITY_AZURE_BLOB_STORE = "ManagedIdentityAzureBlobStore" - -class BackupSuspensionScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the scope at which the backup suspension was applied. - """ - - #: Indicates an invalid backup suspension scope type also indicating entity is not suspended. All - #: Service Fabric enumerations have the invalid type. - INVALID = "Invalid" - #: Indicates the backup suspension is applied at partition level. - PARTITION = "Partition" - #: Indicates the backup suspension is applied at service level. All partitions of the service are - #: hence suspended for backup. - SERVICE = "Service" - #: Indicates the backup suspension is applied at application level. All services and partitions of - #: the application are hence suspended for backup. - APPLICATION = "Application" - -class BackupType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Describes the type of backup, whether its full or incremental. - """ - - #: Indicates an invalid backup type. All Service Fabric enumerations have the invalid type. - INVALID = "Invalid" - #: Indicates a full backup. - FULL = "Full" - #: Indicates an incremental backup. A backup chain is comprised of a full backup followed by 0 or - #: more incremental backups. - INCREMENTAL = "Incremental" - -class ChaosEventKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of Chaos event. - """ - - #: Indicates an invalid Chaos event kind. All Service Fabric enumerations have the invalid type. - INVALID = "Invalid" - #: Indicates a Chaos event that gets generated when Chaos is started. - STARTED = "Started" - #: Indicates a Chaos event that gets generated when Chaos has decided on the faults for an - #: iteration. This Chaos event contains the details of the faults as a list of strings. - EXECUTING_FAULTS = "ExecutingFaults" - #: Indicates a Chaos event that gets generated when Chaos is waiting for the cluster to become - #: ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. - WAITING = "Waiting" - #: Indicates a Chaos event that gets generated when the cluster entities do not become stable and - #: healthy within ChaosParameters.MaxClusterStabilizationTimeoutInSeconds. - VALIDATION_FAILED = "ValidationFailed" - #: Indicates a Chaos event that gets generated when an unexpected event has occurred in the Chaos - #: engine, for example, due to the cluster snapshot being inconsistent, while faulting a faultable - #: entity Chaos found that the entity was already faulted. - TEST_ERROR = "TestError" - #: Indicates a Chaos event that gets generated when Chaos stops because either the user issued a - #: stop or the time to run was up. - STOPPED = "Stopped" - -class ChaosScheduleStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Current status of the schedule. - """ - - #: Indicates an invalid Chaos Schedule status. All Service Fabric enumerations have the invalid - #: type. The value is zero. - INVALID = "Invalid" - #: Indicates that the schedule is stopped and not being used to schedule runs of chaos. The value - #: is one. - STOPPED = "Stopped" - #: Indicates that the schedule is active and is being used to schedule runs of Chaos. The value is - #: two. - ACTIVE = "Active" - #: Indicates that the schedule is expired and will no longer be used to schedule runs of Chaos. - #: The value is three. - EXPIRED = "Expired" - #: Indicates that the schedule is pending and is not yet being used to schedule runs of Chaos but - #: will be used when the start time is passed. The value is four. - PENDING = "Pending" - -class ChaosStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Current status of the Chaos run. - """ - - #: Indicates an invalid Chaos status. All Service Fabric enumerations have the invalid type. The - #: value is zero. - INVALID = "Invalid" - #: Indicates that Chaos is not stopped. The value is one. - RUNNING = "Running" - #: Indicates that Chaos is not scheduling further faults. The value is two. - STOPPED = "Stopped" - -class ComposeDeploymentStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the compose deployment. - """ - - #: Indicates that the compose deployment status is invalid. The value is zero. - INVALID = "Invalid" - #: Indicates that the compose deployment is being provisioned in background. The value is 1. - PROVISIONING = "Provisioning" - #: Indicates that the compose deployment is being created in background. The value is 2. - CREATING = "Creating" - #: Indicates that the compose deployment has been successfully created or upgraded. The value is - #: 3. - READY = "Ready" - #: Indicates that the compose deployment is being unprovisioned in background. The value is 4. - UNPROVISIONING = "Unprovisioning" - #: Indicates that the compose deployment is being deleted in background. The value is 5. - DELETING = "Deleting" - #: Indicates that the compose deployment was terminated due to persistent failures. The value is - #: 6. - FAILED = "Failed" - #: Indicates that the compose deployment is being upgraded in the background. The value is 7. - UPGRADING = "Upgrading" - -class ComposeDeploymentUpgradeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The state of the compose deployment upgrade. - """ - - #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: The upgrade is in the progress of provisioning target application type version. The value is 1. - PROVISIONING_TARGET = "ProvisioningTarget" - #: The upgrade is rolling forward to the target version but is not complete yet. The value is 2. - ROLLING_FORWARD_IN_PROGRESS = "RollingForwardInProgress" - #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an - #: explicit move next request in UnmonitoredManual mode or performing health checks in Monitored - #: mode. The value is 3. - ROLLING_FORWARD_PENDING = "RollingForwardPending" - #: The upgrade is in the progress of unprovisioning current application type version and rolling - #: forward to the target version is completed. The value is 4. - UNPROVISIONING_CURRENT = "UnprovisioningCurrent" - #: The upgrade has finished rolling forward. The value is 5. - ROLLING_FORWARD_COMPLETED = "RollingForwardCompleted" - #: The upgrade is rolling back to the previous version but is not complete yet. The value is 6. - ROLLING_BACK_IN_PROGRESS = "RollingBackInProgress" - #: The upgrade is in the progress of unprovisioning target application type version and rolling - #: back to the current version is completed. The value is 7. - UNPROVISIONING_TARGET = "UnprovisioningTarget" - #: The upgrade has finished rolling back. The value is 8. - ROLLING_BACK_COMPLETED = "RollingBackCompleted" - #: The upgrade has failed and is unable to execute FailureAction. The value is 9. - FAILED = "Failed" - -class CreateFabricDump(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specify True to create a dump of the fabric node process. This is case-sensitive. - """ - - FALSE = "False" - TRUE = "True" - -class DataLossMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - #: Reserved. Do not pass into API. - INVALID = "Invalid" - #: PartialDataLoss option will cause a quorum of replicas to go down, triggering an OnDataLoss - #: event in the system for the given partition. - PARTIAL_DATA_LOSS = "PartialDataLoss" - #: FullDataLoss option will drop all the replicas which means that all the data will be lost. - FULL_DATA_LOSS = "FullDataLoss" - -class DayOfWeek(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Describes the days in a week. - """ - - #: Indicates the Day referred is Sunday. - SUNDAY = "Sunday" - #: Indicates the Day referred is Monday. - MONDAY = "Monday" - #: Indicates the Day referred is Tuesday. - TUESDAY = "Tuesday" - #: Indicates the Day referred is Wednesday. - WEDNESDAY = "Wednesday" - #: Indicates the Day referred is Thursday. - THURSDAY = "Thursday" - #: Indicates the Day referred is Friday. - FRIDAY = "Friday" - #: Indicates the Day referred is Saturday. - SATURDAY = "Saturday" - -class DeactivationIntent(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Describes the intent or reason for deactivating the node. The possible values are following. - """ - - #: Indicates that the node should be paused. The value is 1. - PAUSE = "Pause" - #: Indicates that the intent is for the node to be restarted after a short period of time. The - #: value is 2. - RESTART = "Restart" - #: Indicates the intent is for the node to remove data. The value is 3. - REMOVE_DATA = "RemoveData" - -class DeployedApplicationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the application deployed on the node. Following are the possible values. - """ - - #: Indicates that deployment status is not valid. All Service Fabric enumerations have the invalid - #: type. The value is zero. - INVALID = "Invalid" - #: Indicates that the package is downloading from the ImageStore. The value is 1. - DOWNLOADING = "Downloading" - #: Indicates that the package is activating. The value is 2. - ACTIVATING = "Activating" - #: Indicates that the package is active. The value is 3. - ACTIVE = "Active" - #: Indicates that the package is upgrading. The value is 4. - UPGRADING = "Upgrading" - #: Indicates that the package is deactivating. The value is 5. - DEACTIVATING = "Deactivating" - -class DeploymentStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the status of a deployed application or service package on a Service Fabric node. - """ - - #: Indicates status of the application or service package is not known or invalid. The value is 0. - INVALID = "Invalid" - #: Indicates the application or service package is being downloaded to the node from the - #: ImageStore. The value is 1. - DOWNLOADING = "Downloading" - #: Indicates the application or service package is being activated. The value is 2. - ACTIVATING = "Activating" - #: Indicates the application or service package is active the node. The value is 3. - ACTIVE = "Active" - #: Indicates the application or service package is being upgraded. The value is 4. - UPGRADING = "Upgrading" - #: Indicates the application or service package is being deactivated. The value is 5. - DEACTIVATING = "Deactivating" - #: Indicates the application or service package has ran to completion successfully. The value is - #: 6. - RAN_TO_COMPLETION = "RanToCompletion" - #: Indicates the application or service package has failed to run to completion. The value is 7. - FAILED = "Failed" - -class DiagnosticsSinkKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of DiagnosticsSink. - """ - - #: Indicates an invalid sink kind. All Service Fabric enumerations have the invalid type. - INVALID = "Invalid" - #: Diagnostics settings for Geneva. - AZURE_INTERNAL_MONITORING_PIPELINE = "AzureInternalMonitoringPipeline" - -class EntityKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The entity type of a Service Fabric entity such as Cluster, Node, Application, Service, - Partition, Replica etc. - """ - - #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. The - #: value is zero. - INVALID = "Invalid" - #: Indicates the entity is a Service Fabric node. The value is 1. - NODE = "Node" - #: Indicates the entity is a Service Fabric partition. The value is 2. - PARTITION = "Partition" - #: Indicates the entity is a Service Fabric service. The value is 3. - SERVICE = "Service" - #: Indicates the entity is a Service Fabric application. The value is 4. - APPLICATION = "Application" - #: Indicates the entity is a Service Fabric replica. The value is 5. - REPLICA = "Replica" - #: Indicates the entity is a Service Fabric deployed application. The value is 6. - DEPLOYED_APPLICATION = "DeployedApplication" - #: Indicates the entity is a Service Fabric deployed service package. The value is 7. - DEPLOYED_SERVICE_PACKAGE = "DeployedServicePackage" - #: Indicates the entity is a Service Fabric cluster. The value is 8. - CLUSTER = "Cluster" - -class EntryPointStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the status of the code package entry point deployed on a Service Fabric node. - """ - - #: Indicates status of entry point is not known or invalid. The value is 0. - INVALID = "Invalid" - #: Indicates the entry point is scheduled to be started. The value is 1. - PENDING = "Pending" - #: Indicates the entry point is being started. The value is 2. - STARTING = "Starting" - #: Indicates the entry point was started successfully and is running. The value is 3. - STARTED = "Started" - #: Indicates the entry point is being stopped. The value is 4. - STOPPING = "Stopping" - #: Indicates the entry point is not running. The value is 5. - STOPPED = "Stopped" - -class EnvironmentVariableType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of the environment variable being given in value - """ - - #: The environment variable in clear text, will not be processed in any way and passed in as is. - CLEAR_TEXT = "ClearText" - #: The URI to a KeyVault secret version, will be resolved using the application's managed identity - #: (this type is only valid if the app was assigned a managed identity) before getting passed in. - KEY_VAULT_REFERENCE = "KeyVaultReference" - #: The reference to a SecretValue resource, will be resolved before getting passed in. - SECRET_VALUE_REFERENCE = "SecretValueReference" - -class ExecutionPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumerates the execution policy types for services. - """ - - #: Indicates the default execution policy, always restart the service if an exit occurs. - DEFAULT = "Default" - #: Indicates that the service will perform its desired operation and complete successfully. If the - #: service encounters failure, it will restarted based on restart policy specified. If the service - #: completes its operation successfully, it will not be restarted again. - RUN_TO_COMPLETION = "RunToCompletion" - -class FabricErrorCodes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Defines the fabric error codes that be returned as part of the error object in response to - Service Fabric API operations that are not successful. Following are the error code values that - can be returned for a specific HTTP status code. - - - * - Possible values of the error code for HTTP status code 400 (Bad Request) - - - * "FABRIC_E_INVALID_PARTITION_KEY" - * "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" - * "FABRIC_E_INVALID_ADDRESS" - * "FABRIC_E_APPLICATION_NOT_UPGRADING" - * "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" - * "FABRIC_E_FABRIC_NOT_UPGRADING" - * "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" - * "FABRIC_E_INVALID_CONFIGURATION" - * "FABRIC_E_INVALID_NAME_URI" - * "FABRIC_E_PATH_TOO_LONG" - * "FABRIC_E_KEY_TOO_LARGE" - * "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" - * "FABRIC_E_INVALID_ATOMIC_GROUP" - * "FABRIC_E_VALUE_EMPTY" - * "FABRIC_E_BACKUP_IS_ENABLED" - * "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" - * "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" - * "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" - * "E_INVALIDARG" - - * - Possible values of the error code for HTTP status code 404 (Not Found) - - - * "FABRIC_E_NODE_NOT_FOUND" - * "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" - * "FABRIC_E_APPLICATION_NOT_FOUND" - * "FABRIC_E_SERVICE_TYPE_NOT_FOUND" - * "FABRIC_E_SERVICE_DOES_NOT_EXIST" - * "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" - * "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" - * "FABRIC_E_PARTITION_NOT_FOUND" - * "FABRIC_E_REPLICA_DOES_NOT_EXIST" - * "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" - * "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" - * "FABRIC_E_DIRECTORY_NOT_FOUND" - * "FABRIC_E_FABRIC_VERSION_NOT_FOUND" - * "FABRIC_E_FILE_NOT_FOUND" - * "FABRIC_E_NAME_DOES_NOT_EXIST" - * "FABRIC_E_PROPERTY_DOES_NOT_EXIST" - * "FABRIC_E_ENUMERATION_COMPLETED" - * "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" - * "FABRIC_E_KEY_NOT_FOUND" - * "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" - * "FABRIC_E_BACKUP_NOT_ENABLED" - * "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" - * "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" - * "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" - - * - Possible values of the error code for HTTP status code 409 (Conflict) - - - * "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" - * "FABRIC_E_APPLICATION_ALREADY_EXISTS" - * "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" - * "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" - * "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" - * "FABRIC_E_SERVICE_ALREADY_EXISTS" - * "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" - * "FABRIC_E_APPLICATION_TYPE_IN_USE" - * "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" - * "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" - * "FABRIC_E_FABRIC_VERSION_IN_USE" - * "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" - * "FABRIC_E_NAME_ALREADY_EXISTS" - * "FABRIC_E_NAME_NOT_EMPTY" - * "FABRIC_E_PROPERTY_CHECK_FAILED" - * "FABRIC_E_SERVICE_METADATA_MISMATCH" - * "FABRIC_E_SERVICE_TYPE_MISMATCH" - * "FABRIC_E_HEALTH_STALE_REPORT" - * "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" - * "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" - * "FABRIC_E_INSTANCE_ID_MISMATCH" - * "FABRIC_E_BACKUP_IN_PROGRESS" - * "FABRIC_E_RESTORE_IN_PROGRESS" - * "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" - - * - Possible values of the error code for HTTP status code 413 (Request Entity Too Large) - - - * "FABRIC_E_VALUE_TOO_LARGE" - - * - Possible values of the error code for HTTP status code 500 (Internal Server Error) - - - * "FABRIC_E_NODE_IS_UP" - * "E_FAIL" - * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" - * "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" - * "FABRIC_E_VOLUME_ALREADY_EXISTS" - * "FABRIC_E_VOLUME_NOT_FOUND" - * "SerializationError" - - * - Possible values of the error code for HTTP status code 503 (Service Unavailable) - - - * "FABRIC_E_NO_WRITE_QUORUM" - * "FABRIC_E_NOT_PRIMARY" - * "FABRIC_E_NOT_READY" - * "FABRIC_E_RECONFIGURATION_PENDING" - * "FABRIC_E_SERVICE_OFFLINE" - * "E_ABORT" - * "FABRIC_E_VALUE_TOO_LARGE" - - * - Possible values of the error code for HTTP status code 504 (Gateway Timeout) - - - * "FABRIC_E_COMMUNICATION_ERROR" - * "FABRIC_E_OPERATION_NOT_COMPLETE" - * "FABRIC_E_TIMEOUT" - """ - - FABRIC_E_INVALID_PARTITION_KEY = "FABRIC_E_INVALID_PARTITION_KEY" - FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR = "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" - FABRIC_E_INVALID_ADDRESS = "FABRIC_E_INVALID_ADDRESS" - FABRIC_E_APPLICATION_NOT_UPGRADING = "FABRIC_E_APPLICATION_NOT_UPGRADING" - FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR = "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" - FABRIC_E_FABRIC_NOT_UPGRADING = "FABRIC_E_FABRIC_NOT_UPGRADING" - FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR = "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" - FABRIC_E_INVALID_CONFIGURATION = "FABRIC_E_INVALID_CONFIGURATION" - FABRIC_E_INVALID_NAME_URI = "FABRIC_E_INVALID_NAME_URI" - FABRIC_E_PATH_TOO_LONG = "FABRIC_E_PATH_TOO_LONG" - FABRIC_E_KEY_TOO_LARGE = "FABRIC_E_KEY_TOO_LARGE" - FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED = "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" - FABRIC_E_INVALID_ATOMIC_GROUP = "FABRIC_E_INVALID_ATOMIC_GROUP" - FABRIC_E_VALUE_EMPTY = "FABRIC_E_VALUE_EMPTY" - FABRIC_E_NODE_NOT_FOUND = "FABRIC_E_NODE_NOT_FOUND" - FABRIC_E_APPLICATION_TYPE_NOT_FOUND = "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" - FABRIC_E_APPLICATION_NOT_FOUND = "FABRIC_E_APPLICATION_NOT_FOUND" - FABRIC_E_SERVICE_TYPE_NOT_FOUND = "FABRIC_E_SERVICE_TYPE_NOT_FOUND" - FABRIC_E_SERVICE_DOES_NOT_EXIST = "FABRIC_E_SERVICE_DOES_NOT_EXIST" - FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND = "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" - FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND = "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" - FABRIC_E_PARTITION_NOT_FOUND = "FABRIC_E_PARTITION_NOT_FOUND" - FABRIC_E_REPLICA_DOES_NOT_EXIST = "FABRIC_E_REPLICA_DOES_NOT_EXIST" - FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST = "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" - FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND = "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" - FABRIC_E_DIRECTORY_NOT_FOUND = "FABRIC_E_DIRECTORY_NOT_FOUND" - FABRIC_E_FABRIC_VERSION_NOT_FOUND = "FABRIC_E_FABRIC_VERSION_NOT_FOUND" - FABRIC_E_FILE_NOT_FOUND = "FABRIC_E_FILE_NOT_FOUND" - FABRIC_E_NAME_DOES_NOT_EXIST = "FABRIC_E_NAME_DOES_NOT_EXIST" - FABRIC_E_PROPERTY_DOES_NOT_EXIST = "FABRIC_E_PROPERTY_DOES_NOT_EXIST" - FABRIC_E_ENUMERATION_COMPLETED = "FABRIC_E_ENUMERATION_COMPLETED" - FABRIC_E_SERVICE_MANIFEST_NOT_FOUND = "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" - FABRIC_E_KEY_NOT_FOUND = "FABRIC_E_KEY_NOT_FOUND" - FABRIC_E_HEALTH_ENTITY_NOT_FOUND = "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" - FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS = "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" - FABRIC_E_APPLICATION_ALREADY_EXISTS = "FABRIC_E_APPLICATION_ALREADY_EXISTS" - FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION = "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" - FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS = "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" - FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS = "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" - FABRIC_E_SERVICE_ALREADY_EXISTS = "FABRIC_E_SERVICE_ALREADY_EXISTS" - FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS = "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" - FABRIC_E_APPLICATION_TYPE_IN_USE = "FABRIC_E_APPLICATION_TYPE_IN_USE" - FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION = "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" - FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS = "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" - FABRIC_E_FABRIC_VERSION_IN_USE = "FABRIC_E_FABRIC_VERSION_IN_USE" - FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS = "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" - FABRIC_E_NAME_ALREADY_EXISTS = "FABRIC_E_NAME_ALREADY_EXISTS" - FABRIC_E_NAME_NOT_EMPTY = "FABRIC_E_NAME_NOT_EMPTY" - FABRIC_E_PROPERTY_CHECK_FAILED = "FABRIC_E_PROPERTY_CHECK_FAILED" - FABRIC_E_SERVICE_METADATA_MISMATCH = "FABRIC_E_SERVICE_METADATA_MISMATCH" - FABRIC_E_SERVICE_TYPE_MISMATCH = "FABRIC_E_SERVICE_TYPE_MISMATCH" - FABRIC_E_HEALTH_STALE_REPORT = "FABRIC_E_HEALTH_STALE_REPORT" - FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED = "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" - FABRIC_E_NODE_HAS_NOT_STOPPED_YET = "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" - FABRIC_E_INSTANCE_ID_MISMATCH = "FABRIC_E_INSTANCE_ID_MISMATCH" - FABRIC_E_VALUE_TOO_LARGE = "FABRIC_E_VALUE_TOO_LARGE" - FABRIC_E_NO_WRITE_QUORUM = "FABRIC_E_NO_WRITE_QUORUM" - FABRIC_E_NOT_PRIMARY = "FABRIC_E_NOT_PRIMARY" - FABRIC_E_NOT_READY = "FABRIC_E_NOT_READY" - FABRIC_E_RECONFIGURATION_PENDING = "FABRIC_E_RECONFIGURATION_PENDING" - FABRIC_E_SERVICE_OFFLINE = "FABRIC_E_SERVICE_OFFLINE" - E_ABORT = "E_ABORT" - FABRIC_E_COMMUNICATION_ERROR = "FABRIC_E_COMMUNICATION_ERROR" - FABRIC_E_OPERATION_NOT_COMPLETE = "FABRIC_E_OPERATION_NOT_COMPLETE" - FABRIC_E_TIMEOUT = "FABRIC_E_TIMEOUT" - FABRIC_E_NODE_IS_UP = "FABRIC_E_NODE_IS_UP" - E_FAIL = "E_FAIL" - FABRIC_E_BACKUP_IS_ENABLED = "FABRIC_E_BACKUP_IS_ENABLED" - FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH = "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" - FABRIC_E_INVALID_FOR_STATELESS_SERVICES = "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" - FABRIC_E_BACKUP_NOT_ENABLED = "FABRIC_E_BACKUP_NOT_ENABLED" - FABRIC_E_BACKUP_POLICY_NOT_EXISTING = "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" - FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING = "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" - FABRIC_E_BACKUP_IN_PROGRESS = "FABRIC_E_BACKUP_IN_PROGRESS" - FABRIC_E_RESTORE_IN_PROGRESS = "FABRIC_E_RESTORE_IN_PROGRESS" - FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING = "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" - FABRIC_E_INVALID_SERVICE_SCALING_POLICY = "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" - E_INVALIDARG = "E_INVALIDARG" - FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS = "FABRIC_E_SINGLE_INSTANCE_APPLICATION_ALREADY_EXISTS" - FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND = "FABRIC_E_SINGLE_INSTANCE_APPLICATION_NOT_FOUND" - FABRIC_E_VOLUME_ALREADY_EXISTS = "FABRIC_E_VOLUME_ALREADY_EXISTS" - FABRIC_E_VOLUME_NOT_FOUND = "FABRIC_E_VOLUME_NOT_FOUND" - SERIALIZATION_ERROR = "SerializationError" - FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR = "FABRIC_E_IMAGEBUILDER_RESERVED_DIRECTORY_ERROR" - -class FabricEventKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of FabricEvent. - """ - - CLUSTER_EVENT = "ClusterEvent" - CONTAINER_INSTANCE_EVENT = "ContainerInstanceEvent" - NODE_EVENT = "NodeEvent" - APPLICATION_EVENT = "ApplicationEvent" - SERVICE_EVENT = "ServiceEvent" - PARTITION_EVENT = "PartitionEvent" - REPLICA_EVENT = "ReplicaEvent" - PARTITION_ANALYSIS_EVENT = "PartitionAnalysisEvent" - APPLICATION_CREATED = "ApplicationCreated" - APPLICATION_DELETED = "ApplicationDeleted" - APPLICATION_NEW_HEALTH_REPORT = "ApplicationNewHealthReport" - APPLICATION_HEALTH_REPORT_EXPIRED = "ApplicationHealthReportExpired" - APPLICATION_UPGRADE_COMPLETED = "ApplicationUpgradeCompleted" - APPLICATION_UPGRADE_DOMAIN_COMPLETED = "ApplicationUpgradeDomainCompleted" - APPLICATION_UPGRADE_ROLLBACK_COMPLETED = "ApplicationUpgradeRollbackCompleted" - APPLICATION_UPGRADE_ROLLBACK_STARTED = "ApplicationUpgradeRollbackStarted" - APPLICATION_UPGRADE_STARTED = "ApplicationUpgradeStarted" - DEPLOYED_APPLICATION_NEW_HEALTH_REPORT = "DeployedApplicationNewHealthReport" - DEPLOYED_APPLICATION_HEALTH_REPORT_EXPIRED = "DeployedApplicationHealthReportExpired" - APPLICATION_PROCESS_EXITED = "ApplicationProcessExited" - APPLICATION_CONTAINER_INSTANCE_EXITED = "ApplicationContainerInstanceExited" - NODE_ABORTED = "NodeAborted" - NODE_ADDED_TO_CLUSTER = "NodeAddedToCluster" - NODE_CLOSED = "NodeClosed" - NODE_DEACTIVATE_COMPLETED = "NodeDeactivateCompleted" - NODE_DEACTIVATE_STARTED = "NodeDeactivateStarted" - NODE_DOWN = "NodeDown" - NODE_NEW_HEALTH_REPORT = "NodeNewHealthReport" - NODE_HEALTH_REPORT_EXPIRED = "NodeHealthReportExpired" - NODE_OPEN_SUCCEEDED = "NodeOpenSucceeded" - NODE_OPEN_FAILED = "NodeOpenFailed" - NODE_REMOVED_FROM_CLUSTER = "NodeRemovedFromCluster" - NODE_UP = "NodeUp" - PARTITION_NEW_HEALTH_REPORT = "PartitionNewHealthReport" - PARTITION_HEALTH_REPORT_EXPIRED = "PartitionHealthReportExpired" - PARTITION_RECONFIGURED = "PartitionReconfigured" - PARTITION_PRIMARY_MOVE_ANALYSIS = "PartitionPrimaryMoveAnalysis" - SERVICE_CREATED = "ServiceCreated" - SERVICE_DELETED = "ServiceDeleted" - SERVICE_NEW_HEALTH_REPORT = "ServiceNewHealthReport" - SERVICE_HEALTH_REPORT_EXPIRED = "ServiceHealthReportExpired" - DEPLOYED_SERVICE_PACKAGE_NEW_HEALTH_REPORT = "DeployedServicePackageNewHealthReport" - DEPLOYED_SERVICE_PACKAGE_HEALTH_REPORT_EXPIRED = "DeployedServicePackageHealthReportExpired" - STATEFUL_REPLICA_NEW_HEALTH_REPORT = "StatefulReplicaNewHealthReport" - STATEFUL_REPLICA_HEALTH_REPORT_EXPIRED = "StatefulReplicaHealthReportExpired" - STATELESS_REPLICA_NEW_HEALTH_REPORT = "StatelessReplicaNewHealthReport" - STATELESS_REPLICA_HEALTH_REPORT_EXPIRED = "StatelessReplicaHealthReportExpired" - CLUSTER_NEW_HEALTH_REPORT = "ClusterNewHealthReport" - CLUSTER_HEALTH_REPORT_EXPIRED = "ClusterHealthReportExpired" - CLUSTER_UPGRADE_COMPLETED = "ClusterUpgradeCompleted" - CLUSTER_UPGRADE_DOMAIN_COMPLETED = "ClusterUpgradeDomainCompleted" - CLUSTER_UPGRADE_ROLLBACK_COMPLETED = "ClusterUpgradeRollbackCompleted" - CLUSTER_UPGRADE_ROLLBACK_STARTED = "ClusterUpgradeRollbackStarted" - CLUSTER_UPGRADE_STARTED = "ClusterUpgradeStarted" - CHAOS_STOPPED = "ChaosStopped" - CHAOS_STARTED = "ChaosStarted" - CHAOS_CODE_PACKAGE_RESTART_SCHEDULED = "ChaosCodePackageRestartScheduled" - CHAOS_REPLICA_REMOVAL_SCHEDULED = "ChaosReplicaRemovalScheduled" - CHAOS_PARTITION_SECONDARY_MOVE_SCHEDULED = "ChaosPartitionSecondaryMoveScheduled" - CHAOS_PARTITION_PRIMARY_MOVE_SCHEDULED = "ChaosPartitionPrimaryMoveScheduled" - CHAOS_REPLICA_RESTART_SCHEDULED = "ChaosReplicaRestartScheduled" - CHAOS_NODE_RESTART_SCHEDULED = "ChaosNodeRestartScheduled" - -class FabricReplicaStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the status of the replica. - """ - - #: Indicates that the read or write operation access status is not valid. This value is not - #: returned to the caller. - INVALID = "Invalid" - #: Indicates that the replica is down. - DOWN = "Down" - #: Indicates that the replica is up. - UP = "Up" - -class FailureAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The compensating action to perform when a Monitored upgrade encounters monitoring policy or - health policy violations. - Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will start - rolling back automatically. - Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode. - """ - - #: Indicates the failure action is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: The upgrade will start rolling back automatically. The value is 1. - ROLLBACK = "Rollback" - #: The upgrade will switch to UnmonitoredManual upgrade mode. The value is 2. - MANUAL = "Manual" - -class FailureReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The cause of an upgrade failure that resulted in FailureAction being executed. - """ - - #: Indicates the reason is invalid or unknown. All Service Fabric enumerations have the invalid - #: type. The value is zero. - NONE = "None" - #: There was an external request to roll back the upgrade. The value is 1. - INTERRUPTED = "Interrupted" - #: The upgrade failed due to health policy violations. The value is 2. - HEALTH_CHECK = "HealthCheck" - #: An upgrade domain took longer than the allowed upgrade domain timeout to process. The value is - #: 3. - UPGRADE_DOMAIN_TIMEOUT = "UpgradeDomainTimeout" - #: The overall upgrade took longer than the allowed upgrade timeout to process. The value is 4. - OVERALL_UPGRADE_TIMEOUT = "OverallUpgradeTimeout" - -class HeaderMatchType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """how to match header value - """ - - EXACT = "exact" - -class HealthEvaluationKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The health manager in the cluster performs health evaluations in determining the aggregated - health state of an entity. This enumeration provides information on the kind of evaluation that - was performed. Following are the possible values. - """ - - #: Indicates that the health evaluation is invalid. The value is zero. - INVALID = "Invalid" - #: Indicates that the health evaluation is for a health event. The value is 1. - EVENT = "Event" - #: Indicates that the health evaluation is for the replicas of a partition. The value is 2. - REPLICAS = "Replicas" - #: Indicates that the health evaluation is for the partitions of a service. The value is 3. - PARTITIONS = "Partitions" - #: Indicates that the health evaluation is for the deployed service packages of a deployed - #: application. The value is 4. - DEPLOYED_SERVICE_PACKAGES = "DeployedServicePackages" - #: Indicates that the health evaluation is for the deployed applications of an application. The - #: value is 5. - DEPLOYED_APPLICATIONS = "DeployedApplications" - #: Indicates that the health evaluation is for services of an application. The value is 6. - SERVICES = "Services" - #: Indicates that the health evaluation is for the cluster nodes. The value is 7. - NODES = "Nodes" - #: Indicates that the health evaluation is for the cluster applications. The value is 8. - APPLICATIONS = "Applications" - #: Indicates that the health evaluation is for the system application. The value is 9. - SYSTEM_APPLICATION = "SystemApplication" - #: Indicates that the health evaluation is for the deployed applications of an application in an - #: upgrade domain. The value is 10. - UPGRADE_DOMAIN_DEPLOYED_APPLICATIONS = "UpgradeDomainDeployedApplications" - #: Indicates that the health evaluation is for the cluster nodes in an upgrade domain. The value - #: is 11. - UPGRADE_DOMAIN_NODES = "UpgradeDomainNodes" - #: Indicates that the health evaluation is for a replica. The value is 13. - REPLICA = "Replica" - #: Indicates that the health evaluation is for a partition. The value is 14. - PARTITION = "Partition" - #: Indicates that the health evaluation is for a deployed service package. The value is 16. - DEPLOYED_SERVICE_PACKAGE = "DeployedServicePackage" - #: Indicates that the health evaluation is for a deployed application. The value is 17. - DEPLOYED_APPLICATION = "DeployedApplication" - #: Indicates that the health evaluation is for a service. The value is 15. - SERVICE = "Service" - #: Indicates that the health evaluation is for a node. The value is 12. - NODE = "Node" - #: Indicates that the health evaluation is for an application. The value is 18. - APPLICATION = "Application" - #: Indicates that the health evaluation is for the delta of unhealthy cluster nodes. The value is - #: 19. - DELTA_NODES_CHECK = "DeltaNodesCheck" - #: Indicates that the health evaluation is for the delta of unhealthy upgrade domain cluster - #: nodes. The value is 20. - UPGRADE_DOMAIN_DELTA_NODES_CHECK = "UpgradeDomainDeltaNodesCheck" - #: – Indicates that the health evaluation is for applications of an application type. The value is - #: 21. - APPLICATION_TYPE_APPLICATIONS = "ApplicationTypeApplications" - #: – Indicates that the health evaluation is for nodes of a node type. The value is 22. - NODE_TYPE_NODES = "NodeTypeNodes" - -class HealthState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The health state of a Service Fabric entity such as Cluster, Node, Application, Service, - Partition, Replica etc. - """ - - #: Indicates an invalid health state. All Service Fabric enumerations have the invalid type. The - #: value is zero. - INVALID = "Invalid" - #: Indicates the health state is okay. The value is 1. - OK = "Ok" - #: Indicates the health state is at a warning level. The value is 2. - WARNING = "Warning" - #: Indicates the health state is at an error level. Error health state should be investigated, as - #: they can impact the correct functionality of the cluster. The value is 3. - ERROR = "Error" - #: Indicates an unknown health status. The value is 65535. - UNKNOWN = "Unknown" - -class HostIsolationMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the isolation mode of main entry point of a code package when it's host type is - ContainerHost. This is specified as part of container host policies in application manifest - while importing service manifest. - """ - - #: Indicates the isolation mode is not applicable for given HostType. The value is 0. - NONE = "None" - #: This is the default isolation mode for a ContainerHost. The value is 1. - PROCESS = "Process" - #: Indicates the ContainerHost is a Hyper-V container. This applies to only Windows containers. - #: The value is 2. - HYPER_V = "HyperV" - -class HostOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """choices for server host - """ - - #: host: http://localhost:19080/. - HTTP_LOCALHOST19080_ = "http://localhost:19080/" - #: host: https://localhost:19080/. - HTTPS_LOCALHOST19080_ = "https://localhost:19080/" - -class HostType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the type of host for main entry point of a code package as specified in service - manifest. - """ - - #: Indicates the type of host is not known or invalid. The value is 0. - INVALID = "Invalid" - #: Indicates the host is an executable. The value is 1. - EXE_HOST = "ExeHost" - #: Indicates the host is a container. The value is 2. - CONTAINER_HOST = "ContainerHost" - -class ImageRegistryPasswordType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of the image registry password being given in password - """ - - #: The image registry password in clear text, will not be processed in any way and used directly. - CLEAR_TEXT = "ClearText" - #: The URI to a KeyVault secret version, will be resolved using the application's managed identity - #: (this type is only valid if the app was assigned a managed identity) before getting used. - KEY_VAULT_REFERENCE = "KeyVaultReference" - #: The reference to a SecretValue resource, will be resolved before getting used. - SECRET_VALUE_REFERENCE = "SecretValueReference" - -class ImpactLevel(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The level of impact expected. - """ - - INVALID = "Invalid" - NONE = "None" - RESTART = "Restart" - REMOVE_DATA = "RemoveData" - REMOVE_NODE = "RemoveNode" - -class ManagedIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of managed identity to be used to connect to Azure Blob Store via Managed Identity. - """ - - #: Indicates an invalid managed identity type. All Service Fabric enumerations have the invalid - #: type. - INVALID = "Invalid" - #: Indicates VMSS managed identity should be used to connect to Azure blob store. - VMSS = "VMSS" - #: Indicates cluster managed identity should be used to connect to Azure blob store. - CLUSTER = "Cluster" - -class MoveCost(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the move cost for the service. - """ - - #: Zero move cost. This value is zero. - ZERO = "Zero" - #: Specifies the move cost of the service as Low. The value is 1. - LOW = "Low" - #: Specifies the move cost of the service as Medium. The value is 2. - MEDIUM = "Medium" - #: Specifies the move cost of the service as High. The value is 3. - HIGH = "High" - #: Specifies the move cost of the service as VeryHigh. The value is 4. - VERY_HIGH = "VeryHigh" - -class NetworkKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of a Service Fabric container network. - """ - - #: Indicates a container network local to a single Service Fabric cluster. The value is 1. - LOCAL = "Local" - -class NodeDeactivationIntent(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The intent or the reason for deactivating the node. Following are the possible values for it. - """ - - #: Indicates the node deactivation intent is invalid. All Service Fabric enumerations have the - #: invalid type. The value is zero. This value is not used. - INVALID = "Invalid" - #: Indicates that the node should be paused. The value is 1. - PAUSE = "Pause" - #: Indicates that the intent is for the node to be restarted after a short period of time. Service - #: Fabric does not restart the node, this action is done outside of Service Fabric. The value is - #: 2. - RESTART = "Restart" - #: Indicates that the intent is to reimage the node. Service Fabric does not reimage the node, - #: this action is done outside of Service Fabric. The value is 3. - REMOVE_DATA = "RemoveData" - #: Indicates that the node is being decommissioned and is not expected to return. Service Fabric - #: does not decommission the node, this action is done outside of Service Fabric. The value is 4. - REMOVE_NODE = "RemoveNode" - -class NodeDeactivationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of node deactivation operation. Following are the possible values. - """ - - #: No status is associated with the task. The value is zero. - NONE = "None" - #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe - #: to proceed to ensure availability of the service and reliability of the state. This value - #: indicates that one or more safety checks are in progress. The value is 1. - SAFETY_CHECK_IN_PROGRESS = "SafetyCheckInProgress" - #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe - #: to proceed to ensure availability of the service and reliability of the state. This value - #: indicates that all safety checks have been completed. The value is 2. - SAFETY_CHECK_COMPLETE = "SafetyCheckComplete" - #: The task is completed. The value is 3. - COMPLETED = "Completed" - -class NodeDeactivationTaskType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of the task that performed the node deactivation. Following are the possible values. - """ - - #: Indicates the node deactivation task type is invalid. All Service Fabric enumerations have the - #: invalid type. The value is zero. This value is not used. - INVALID = "Invalid" - #: Specifies the task created by Infrastructure hosting the nodes. The value is 1. - INFRASTRUCTURE = "Infrastructure" - #: Specifies the task that was created by the Repair Manager service. The value is 2. - REPAIR = "Repair" - #: Specifies that the task was created by using the public API. The value is 3. - CLIENT = "Client" - -class NodeStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the node. - """ - - #: Indicates the node status is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: Indicates the node is up. The value is 1. - UP = "Up" - #: Indicates the node is down. The value is 2. - DOWN = "Down" - #: Indicates the node is in process of being enabled. The value is 3. - ENABLING = "Enabling" - #: Indicates the node is in the process of being disabled. The value is 4. - DISABLING = "Disabling" - #: Indicates the node is disabled. The value is 5. - DISABLED = "Disabled" - #: Indicates the node is unknown. A node would be in Unknown state if Service Fabric does not have - #: authoritative information about that node. This can happen if the system learns about a node at - #: runtime.The value is 6. - UNKNOWN = "Unknown" - #: Indicates the node is removed. A node would be in Removed state if NodeStateRemoved API has - #: been called for this node. In other words, Service Fabric has been informed that the persisted - #: state on the node has been permanently lost. The value is 7. - REMOVED = "Removed" - -class NodeStatusFilter(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - #: This filter value will match all of the nodes excepts the ones with status as Unknown or - #: Removed. - DEFAULT = "default" - #: This filter value will match all of the nodes. - ALL = "all" - #: This filter value will match nodes that are Up. - UP = "up" - #: This filter value will match nodes that are Down. - DOWN = "down" - #: This filter value will match nodes that are in the process of being enabled with status as - #: Enabling. - ENABLING = "enabling" - #: This filter value will match nodes that are in the process of being disabled with status as - #: Disabling. - DISABLING = "disabling" - #: This filter value will match nodes that are Disabled. - DISABLED = "disabled" - #: This filter value will match nodes whose status is Unknown. A node would be in Unknown state if - #: Service Fabric does not have authoritative information about that node. This can happen if the - #: system learns about a node at runtime. - UNKNOWN = "unknown" - #: This filter value will match nodes whose status is Removed. These are the nodes that are - #: removed from the cluster using the RemoveNodeState API. - REMOVED = "removed" - -class NodeTransitionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - #: Reserved. Do not pass into API. - INVALID = "Invalid" - #: Transition a stopped node to up. - START = "Start" - #: Transition an up node to stopped. - STOP = "Stop" - -class NodeUpgradePhase(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The state of the upgrading node. - """ - - #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: The upgrade has not started yet due to pending safety checks. The value is 1. - PRE_UPGRADE_SAFETY_CHECK = "PreUpgradeSafetyCheck" - #: The upgrade is in progress. The value is 2. - UPGRADING = "Upgrading" - #: The upgrade has completed and post upgrade safety checks are being performed. The value is 3. - POST_UPGRADE_SAFETY_CHECK = "PostUpgradeSafetyCheck" - -class OperatingSystemType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The operation system required by the code in service. - """ - - #: The required operating system is Linux. - LINUX = "Linux" - #: The required operating system is Windows. - WINDOWS = "Windows" - -class OperationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The state of the operation. - """ - - #: The operation state is invalid. - INVALID = "Invalid" - #: The operation is in progress. - RUNNING = "Running" - #: The operation is rolling back internal system state because it encountered a fatal error or was - #: cancelled by the user. "RollingBack" does not refer to user state. For example, if - #: CancelOperation is called on a command of type PartitionDataLoss, state of "RollingBack" does - #: not mean service data is being restored (assuming the command has progressed far enough to - #: cause data loss). It means the system is rolling back/cleaning up internal system state - #: associated with the command. - ROLLING_BACK = "RollingBack" - #: The operation has completed successfully and is no longer running. - COMPLETED = "Completed" - #: The operation has failed and is no longer running. - FAULTED = "Faulted" - #: The operation was cancelled by the user using CancelOperation, and is no longer running. - CANCELLED = "Cancelled" - #: The operation was cancelled by the user using CancelOperation, with the force parameter set to - #: true. It is no longer running. Refer to CancelOperation for more details. - FORCE_CANCELLED = "ForceCancelled" - -class OperationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of the operation. - """ - - #: The operation state is invalid. - INVALID = "Invalid" - #: An operation started using the StartDataLoss API. - PARTITION_DATA_LOSS = "PartitionDataLoss" - #: An operation started using the StartQuorumLoss API. - PARTITION_QUORUM_LOSS = "PartitionQuorumLoss" - #: An operation started using the StartPartitionRestart API. - PARTITION_RESTART = "PartitionRestart" - #: An operation started using the StartNodeTransition API. - NODE_TRANSITION = "NodeTransition" - -class Ordering(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Defines the order. - """ - - #: Descending sort order. - DESC = "Desc" - #: Ascending sort order. - ASC = "Asc" - -class PackageSharingPolicyScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Represents the scope for PackageSharingPolicy. This is specified during - DeployServicePackageToNode operation. - """ - - #: No package sharing policy scope. The value is 0. - NONE = "None" - #: Share all code, config and data packages from corresponding service manifest. The value is 1. - ALL = "All" - #: Share all code packages from corresponding service manifest. The value is 2. - CODE = "Code" - #: Share all config packages from corresponding service manifest. The value is 3. - CONFIG = "Config" - #: Share all data packages from corresponding service manifest. The value is 4. - DATA = "Data" - -class PartitionAccessStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the access status of the partition. - """ - - #: Indicates that the read or write operation access status is not valid. This value is not - #: returned to the caller. - INVALID = "Invalid" - #: Indicates that the read or write operation access is granted and the operation is allowed. - GRANTED = "Granted" - #: Indicates that the client should try again later, because a reconfiguration is in progress. - RECONFIGURATION_PENDING = "ReconfigurationPending" - #: Indicates that this client request was received by a replica that is not a Primary replica. - NOT_PRIMARY = "NotPrimary" - #: Indicates that no write quorum is available and, therefore, no write operation can be accepted. - NO_WRITE_QUORUM = "NoWriteQuorum" - -class PartitionScheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumerates the ways that a service can be partitioned. - """ - - #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: Indicates that the partition is based on string names, and is a - #: SingletonPartitionSchemeDescription object, The value is 1. - SINGLETON = "Singleton" - #: Indicates that the partition is based on Int64 key ranges, and is a - #: UniformInt64RangePartitionSchemeDescription object. The value is 2. - UNIFORM_INT64_RANGE = "UniformInt64Range" - #: Indicates that the partition is based on string names, and is a NamedPartitionSchemeDescription - #: object. The value is 3. - NAMED = "Named" - -class PathMatchType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """how to match value in the Uri - """ - - PREFIX = "prefix" - -class PropertyBatchInfoKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of property batch info, determined by the results of a property batch. The following - are the possible values. - """ - - #: Indicates the property batch info is invalid. All Service Fabric enumerations have the invalid - #: type. - INVALID = "Invalid" - #: The property batch succeeded. - SUCCESSFUL = "Successful" - #: The property batch failed. - FAILED = "Failed" - -class PropertyBatchOperationKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of property batch operation, determined by the operation to be performed. The - following are the possible values. - """ - - #: Indicates the property operation is invalid. All Service Fabric enumerations have the invalid - #: type. The value is zero. - INVALID = "Invalid" - #: The operation will create or edit a property. The value is 1. - PUT = "Put" - #: The operation will get a property. The value is 2. - GET = "Get" - #: The operation will check that a property exists or doesn't exists, depending on the provided - #: value. The value is 3. - CHECK_EXISTS = "CheckExists" - #: The operation will ensure that the sequence number is equal to the provided value. The value is - #: 4. - CHECK_SEQUENCE = "CheckSequence" - #: The operation will delete a property. The value is 5. - DELETE = "Delete" - #: The operation will ensure that the value of a property is equal to the provided value. The - #: value is 7. - CHECK_VALUE = "CheckValue" - -class PropertyValueKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of property, determined by the type of data. Following are the possible values. - """ - - #: Indicates the property is invalid. All Service Fabric enumerations have the invalid type. The - #: value is zero. - INVALID = "Invalid" - #: The data inside the property is a binary blob. The value is 1. - BINARY = "Binary" - #: The data inside the property is an int64. The value is 2. - INT64 = "Int64" - #: The data inside the property is a double. The value is 3. - DOUBLE = "Double" - #: The data inside the property is a string. The value is 4. - STRING = "String" - #: The data inside the property is a guid. The value is 5. - GUID = "Guid" - -class ProvisionApplicationTypeKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of application type registration or provision requested. The application package can - be registered or provisioned either from the image store or from an external store. Following - are the kinds of the application type provision. - """ - - #: Indicates that the provision kind is invalid. This value is default and should not be used. The - #: value is zero. - INVALID = "Invalid" - #: Indicates that the provision is for a package that was previously uploaded to the image store. - #: The value is 1. - IMAGE_STORE_PATH = "ImageStorePath" - #: Indicates that the provision is for an application package that was previously uploaded to an - #: external store. The application package ends with the extension *.sfpkg. The value is 2. - EXTERNAL_STORE = "ExternalStore" - -class QuorumLossMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - #: Reserved. Do not pass into API. - INVALID = "Invalid" - #: Partial Quorum loss mode : Minimum number of replicas for a partition will be down that will - #: cause a quorum loss. - QUORUM_REPLICAS = "QuorumReplicas" - ALL_REPLICAS = "AllReplicas" - -class ReconfigurationPhase(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The reconfiguration phase of a replica of a stateful service. - """ - - #: Indicates the invalid reconfiguration phase. - UNKNOWN = "Unknown" - #: Specifies that there is no reconfiguration in progress. - NONE = "None" - #: Refers to the phase where the reconfiguration is transferring data from the previous primary to - #: the new primary. - PHASE0 = "Phase0" - #: Refers to the phase where the reconfiguration is querying the replica set for the progress. - PHASE1 = "Phase1" - #: Refers to the phase where the reconfiguration is ensuring that data from the current primary is - #: present in a majority of the replica set. - PHASE2 = "Phase2" - #: This phase is for internal use only. - PHASE3 = "Phase3" - #: This phase is for internal use only. - PHASE4 = "Phase4" - #: This phase is for internal use only. - ABORT_PHASE_ZERO = "AbortPhaseZero" - -class ReconfigurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of reconfiguration for replica of a stateful service. - """ - - #: Indicates the invalid reconfiguration type. - UNKNOWN = "Unknown" - #: Specifies that the primary replica is being swapped with a different replica. - SWAP_PRIMARY = "SwapPrimary" - #: Reconfiguration triggered in response to a primary going down. This could be due to many - #: reasons such as primary replica crashing etc. - FAILOVER = "Failover" - #: Reconfigurations where the primary replica is not changing. - OTHER = "Other" - -class RepairImpactKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the kind of the impact. This type supports the Service Fabric platform; it is not - meant to be used directly from your code.' - """ - - #: The repair impact is not valid or is of an unknown type. - INVALID = "Invalid" - #: The repair impact affects a set of Service Fabric nodes. - NODE = "Node" - -class RepairTargetKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the kind of the repair target. This type supports the Service Fabric platform; it is - not meant to be used directly from your code.' - """ - - #: The repair target is not valid or is of an unknown type. - INVALID = "Invalid" - #: The repair target is a set of Service Fabric nodes. - NODE = "Node" - -class RepairTaskHealthCheckState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the workflow state of a repair task's health check. This type supports the Service - Fabric platform; it is not meant to be used directly from your code. - """ - - #: Indicates that the health check has not started. - NOT_STARTED = "NotStarted" - #: Indicates that the health check is in progress. - IN_PROGRESS = "InProgress" - #: Indicates that the health check succeeded. - SUCCEEDED = "Succeeded" - #: Indicates that the health check was skipped. - SKIPPED = "Skipped" - #: Indicates that the health check timed out. - TIMED_OUT = "TimedOut" - -class ReplicaHealthReportServiceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - #: Does not use Service Fabric to make its state highly available or reliable. The value is 1. - STATELESS = "Stateless" - #: Uses Service Fabric to make its state or part of its state highly available and reliable. The - #: value is 2. - STATEFUL = "Stateful" - -class ReplicaKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The role of a replica of a stateful service. - """ - - #: Represents an invalid replica kind. The value is zero. - INVALID = "Invalid" - #: Represents a key value store replica. The value is 1. - KEY_VALUE_STORE = "KeyValueStore" - -class ReplicaRole(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The role of a replica of a stateful service. - """ - - #: Indicates the initial role that a replica is created in. The value is zero. - UNKNOWN = "Unknown" - #: Specifies that the replica has no responsibility in regard to the replica set. The value is 1. - NONE = "None" - #: Refers to the replica in the set on which all read and write operations are complete in order - #: to enforce strong consistency semantics. Read operations are handled directly by the Primary - #: replica, while write operations must be acknowledged by a quorum of the replicas in the replica - #: set. There can only be one Primary replica in a replica set at a time. The value is 2. - PRIMARY = "Primary" - #: Refers to a replica in the set that receives a state transfer from the Primary replica to - #: prepare for becoming an active Secondary replica. There can be multiple Idle Secondary replicas - #: in a replica set at a time. Idle Secondary replicas do not count as a part of a write quorum. - #: The value is 3. - IDLE_SECONDARY = "IdleSecondary" - #: Refers to a replica in the set that receives state updates from the Primary replica, applies - #: them, and sends acknowledgements back. Secondary replicas must participate in the write quorum - #: for a replica set. There can be multiple active Secondary replicas in a replica set at a time. - #: The number of active Secondary replicas is configurable that the reliability subsystem should - #: maintain. The value is 4. - ACTIVE_SECONDARY = "ActiveSecondary" - -class ReplicaStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of a replica of a service. - """ - - #: Indicates the replica status is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: The replica is being built. This means that a primary replica is seeding this replica. The - #: value is 1. - IN_BUILD = "InBuild" - #: The replica is in standby. The value is 2. - STANDBY = "Standby" - #: The replica is ready. The value is 3. - READY = "Ready" - #: The replica is down. The value is 4. - DOWN = "Down" - #: Replica is dropped. This means that the replica has been removed from the replica set. If it is - #: persisted, its state has been deleted. The value is 5. - DROPPED = "Dropped" - -class ReplicatorOperationName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the operation currently being executed by the Replicator. - """ - - #: Default value if the replicator is not yet ready. - INVALID = "Invalid" - #: Replicator is not running any operation from Service Fabric perspective. - NONE = "None" - #: Replicator is opening. - OPEN = "Open" - #: Replicator is in the process of changing its role. - CHANGE_ROLE = "ChangeRole" - #: Due to a change in the replica set, replicator is being updated with its Epoch. - UPDATE_EPOCH = "UpdateEpoch" - #: Replicator is closing. - CLOSE = "Close" - #: Replicator is being aborted. - ABORT = "Abort" - #: Replicator is handling the data loss condition, where the user service may potentially be - #: recovering state from an external source. - ON_DATA_LOSS = "OnDataLoss" - #: Replicator is waiting for a quorum of replicas to be caught up to the latest state. - WAIT_FOR_CATCHUP = "WaitForCatchup" - #: Replicator is in the process of building one or more replicas. - BUILD = "Build" - -class ResourceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Status of the resource. - """ - - #: Indicates the resource status is unknown. The value is zero. - UNKNOWN = "Unknown" - #: Indicates the resource is ready. The value is 1. - READY = "Ready" - #: Indicates the resource is upgrading. The value is 2. - UPGRADING = "Upgrading" - #: Indicates the resource is being created. The value is 3. - CREATING = "Creating" - #: Indicates the resource is being deleted. The value is 4. - DELETING = "Deleting" - #: Indicates the resource is not functional due to persistent failures. See statusDetails property - #: for more details. The value is 5. - FAILED = "Failed" - -class RestartPartitionMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - #: Reserved. Do not pass into API. - INVALID = "Invalid" - #: All replicas or instances in the partition are restarted at once. - ALL_REPLICAS_OR_INSTANCES = "AllReplicasOrInstances" - #: Only the secondary replicas are restarted. - ONLY_ACTIVE_SECONDARIES = "OnlyActiveSecondaries" - -class RestartPolicy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumerates the restart policy for RunToCompletionExecutionPolicy - """ - - #: Service will be restarted when it encounters a failure. - ON_FAILURE = "OnFailure" - #: Service will never be restarted. If the service encounters a failure, it will move to Failed - #: state. - NEVER = "Never" - -class RestoreState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Represents the current state of the partition restore operation. - """ - - #: Indicates an invalid restore state. All Service Fabric enumerations have the invalid type. - INVALID = "Invalid" - #: Operation has been validated and accepted. Restore is yet to be triggered. - ACCEPTED = "Accepted" - #: Restore operation has been triggered and is under process. - RESTORE_IN_PROGRESS = "RestoreInProgress" - #: Operation completed with success. - SUCCESS = "Success" - #: Operation completed with failure. - FAILURE = "Failure" - #: Operation timed out. - TIMEOUT = "Timeout" - -class ResultStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """A value describing the overall result of the repair task execution. Must be specified in the - Restoring and later states, and is immutable once set. - """ - - #: Indicates that the repair task result is invalid. All Service Fabric enumerations have the - #: invalid value. - INVALID = "Invalid" - #: Indicates that the repair task completed execution successfully. - SUCCEEDED = "Succeeded" - #: Indicates that the repair task was cancelled prior to execution. - CANCELLED = "Cancelled" - #: Indicates that execution of the repair task was interrupted by a cancellation request after - #: some work had already been performed. - INTERRUPTED = "Interrupted" - #: Indicates that there was a failure during execution of the repair task. Some work may have been - #: performed. - FAILED = "Failed" - #: Indicates that the repair task result is not yet available, because the repair task has not - #: finished executing. - PENDING = "Pending" - -class RetentionPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of retention policy. Currently only "Basic" retention policy is supported. - """ - - #: Indicates a basic retention policy type. - BASIC = "Basic" - #: Indicates an invalid retention policy type. - INVALID = "Invalid" - -class RollingUpgradeMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The mode used to monitor health during a rolling upgrade. The values are UnmonitoredAuto, - UnmonitoredManual, and Monitored. - """ - - #: Indicates the upgrade mode is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: The upgrade will proceed automatically without performing any health monitoring. The value is - #: 1. - UNMONITORED_AUTO = "UnmonitoredAuto" - #: The upgrade will stop after completing each upgrade domain, giving the opportunity to manually - #: monitor health before proceeding. The value is 2. - UNMONITORED_MANUAL = "UnmonitoredManual" - #: The upgrade will stop after completing each upgrade domain and automatically monitor health - #: before proceeding. The value is 3. - MONITORED = "Monitored" - -class SafetyCheckKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of safety check performed by service fabric before continuing with the operations. - These checks ensure the availability of the service and the reliability of the state. Following - are the kinds of safety checks. - """ - - #: Indicates that the upgrade safety check kind is invalid. All Service Fabric enumerations have - #: the invalid type. The value is zero. - INVALID = "Invalid" - #: Indicates that if we bring down the node then this will result in global seed node quorum loss. - #: The value is 1. - ENSURE_SEED_NODE_QUORUM = "EnsureSeedNodeQuorum" - #: Indicates that there is some partition for which if we bring down the replica on the node, it - #: will result in quorum loss for that partition. The value is 2. - ENSURE_PARTITION_QUORUM = "EnsurePartitionQuorum" - #: Indicates that there is some replica on the node that was moved out of this node due to - #: upgrade. Service Fabric is now waiting for the primary to be moved back to this node. The value - #: is 3. - WAIT_FOR_PRIMARY_PLACEMENT = "WaitForPrimaryPlacement" - #: Indicates that Service Fabric is waiting for a primary replica to be moved out of the node - #: before starting upgrade on that node. The value is 4. - WAIT_FOR_PRIMARY_SWAP = "WaitForPrimarySwap" - #: Indicates that there is some replica on the node that is involved in a reconfiguration. Service - #: Fabric is waiting for the reconfiguration to be complete before staring upgrade on that node. - #: The value is 5. - WAIT_FOR_RECONFIGURATION = "WaitForReconfiguration" - #: Indicates that there is either a replica on the node that is going through copy, or there is a - #: primary replica on the node that is copying data to some other replica. In both cases, bringing - #: down the replica on the node due to upgrade will abort the copy. The value is 6. - WAIT_FOR_INBUILD_REPLICA = "WaitForInbuildReplica" - #: Indicates that there is either a stateless service partition on the node having exactly one - #: instance, or there is a primary replica on the node for which the partition is quorum loss. In - #: both cases, bringing down the replicas due to upgrade will result in loss of availability. The - #: value is 7. - ENSURE_AVAILABILITY = "EnsureAvailability" - -class ScalingMechanismKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumerates the ways that a service can be scaled. - """ - - #: Indicates the scaling mechanism is invalid. All Service Fabric enumerations have the invalid - #: type. The value is zero. - INVALID = "Invalid" - #: Indicates a mechanism for scaling where new instances are added or removed from a partition. - #: The value is 1. - PARTITION_INSTANCE_COUNT = "PartitionInstanceCount" - #: Indicates a mechanism for scaling where new named partitions are added or removed from a - #: service. The value is 2. - ADD_REMOVE_INCREMENTAL_NAMED_PARTITION = "AddRemoveIncrementalNamedPartition" - -class ScalingTriggerKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumerates the ways that a service can be scaled. - """ - - #: Indicates the scaling trigger is invalid. All Service Fabric enumerations have the invalid - #: type. The value is zero. - INVALID = "Invalid" - #: Indicates a trigger where scaling decisions are made based on average load of a partition. The - #: value is 1. - AVERAGE_PARTITION_LOAD = "AveragePartitionLoad" - #: Indicates a trigger where scaling decisions are made based on average load of a service. The - #: value is 2. - AVERAGE_SERVICE_LOAD = "AverageServiceLoad" - -class Scheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Scheme for the http probe. Can be Http or Https. - """ - - #: Indicates that the probe is http. - HTTP = "http" - #: Indicates that the probe is https. No cert validation. - HTTPS = "https" - -class SecretKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Describes the kind of secret. - """ - - #: A simple secret resource whose plaintext value is provided by the user. - INLINED_VALUE = "inlinedValue" - #: A secret resource that references a specific version of a secret stored in Azure Key Vault; the - #: expected value is a versioned KeyVault URI corresponding to the version of the secret being - #: referenced. - KEY_VAULT_VERSIONED_REFERENCE = "keyVaultVersionedReference" - -class ServiceCorrelationScheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The service correlation scheme. - """ - - #: An invalid correlation scheme. Cannot be used. The value is zero. - INVALID = "Invalid" - #: Indicates that this service has an affinity relationship with another service. Provided for - #: backwards compatibility, consider preferring the Aligned or NonAlignedAffinity options. The - #: value is 1. - AFFINITY = "Affinity" - #: Aligned affinity ensures that the primaries of the partitions of the affinitized services are - #: collocated on the same nodes. This is the default and is the same as selecting the Affinity - #: scheme. The value is 2. - ALIGNED_AFFINITY = "AlignedAffinity" - #: Non-Aligned affinity guarantees that all replicas of each service will be placed on the same - #: nodes. Unlike Aligned Affinity, this does not guarantee that replicas of particular role will - #: be collocated. The value is 3. - NON_ALIGNED_AFFINITY = "NonAlignedAffinity" - -class ServiceEndpointRole(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The role of the replica where the endpoint is reported. - """ - - #: Indicates the service endpoint role is invalid. All Service Fabric enumerations have the - #: invalid type. The value is zero. - INVALID = "Invalid" - #: Indicates that the service endpoint is of a stateless service. The value is 1. - STATELESS = "Stateless" - #: Indicates that the service endpoint is of a primary replica of a stateful service. The value is - #: 2. - STATEFUL_PRIMARY = "StatefulPrimary" - #: Indicates that the service endpoint is of a secondary replica of a stateful service. The value - #: is 3. - STATEFUL_SECONDARY = "StatefulSecondary" - -class ServiceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of service (Stateless or Stateful). - """ - - #: Indicates the service kind is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: Does not use Service Fabric to make its state highly available or reliable. The value is 1. - STATELESS = "Stateless" - #: Uses Service Fabric to make its state or part of its state highly available and reliable. The - #: value is 2. - STATEFUL = "Stateful" - -class ServiceLoadMetricWeight(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Determines the metric weight relative to the other metrics that are configured for this - service. During runtime, if two metrics end up in conflict, the Cluster Resource Manager - prefers the metric with the higher weight. - """ - - #: Disables resource balancing for this metric. This value is zero. - ZERO = "Zero" - #: Specifies the metric weight of the service load as Low. The value is 1. - LOW = "Low" - #: Specifies the metric weight of the service load as Medium. The value is 2. - MEDIUM = "Medium" - #: Specifies the metric weight of the service load as High. The value is 3. - HIGH = "High" - -class ServiceOperationName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Specifies the current active life-cycle operation on a stateful service replica or stateless - service instance. - """ - - #: Reserved for future use. - UNKNOWN = "Unknown" - #: The service replica or instance is not going through any life-cycle changes. - NONE = "None" - #: The service replica or instance is being opened. - OPEN = "Open" - #: The service replica is changing roles. - CHANGE_ROLE = "ChangeRole" - #: The service replica or instance is being closed. - CLOSE = "Close" - #: The service replica or instance is being aborted. - ABORT = "Abort" - -class ServicePackageActivationMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The activation mode of service package to be used for a Service Fabric service. This is - specified at the time of creating the Service. - """ - - #: This is the default activation mode. With this activation mode, replicas or instances from - #: different partition(s) of service, on a given node, will share same activation of service - #: package on a node. The value is zero. - SHARED_PROCESS = "SharedProcess" - #: With this activation mode, each replica or instance of service, on a given node, will have its - #: own dedicated activation of service package on a node. The value is 1. - EXCLUSIVE_PROCESS = "ExclusiveProcess" - -class ServicePartitionKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of partitioning scheme used to partition the service. - """ - - #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: Indicates that there is only one partition, and SingletonPartitionSchemeDescription was - #: specified while creating the service. The value is 1. - SINGLETON = "Singleton" - #: Indicates that the partition is based on Int64 key ranges, and - #: UniformInt64RangePartitionSchemeDescription was specified while creating the service. The value - #: is 2. - INT64_RANGE = "Int64Range" - #: Indicates that the partition is based on string names, and NamedPartitionInformation was - #: specified while creating the service. The value is 3. - NAMED = "Named" - -class ServicePartitionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the service fabric service partition. - """ - - #: Indicates the partition status is invalid. All Service Fabric enumerations have the invalid - #: type. The value is zero. - INVALID = "Invalid" - #: Indicates that the partition is ready. This means that for a stateless service partition there - #: is at least one instance that is up and for a stateful service partition the number of ready - #: replicas is greater than or equal to the MinReplicaSetSize. The value is 1. - READY = "Ready" - #: Indicates that the partition is not ready. This status is returned when none of the other - #: states apply. The value is 2. - NOT_READY = "NotReady" - #: Indicates that the partition is in quorum loss. This means that number of replicas that are up - #: and participating in a replica set is less than MinReplicaSetSize for this partition. The value - #: is 3. - IN_QUORUM_LOSS = "InQuorumLoss" - #: Indicates that the partition is undergoing reconfiguration of its replica sets. This can happen - #: due to failover, upgrade, load balancing or addition or removal of replicas from the replica - #: set. The value is 4. - RECONFIGURING = "Reconfiguring" - #: Indicates that the partition is being deleted. The value is 5. - DELETING = "Deleting" - -class ServicePlacementPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of placement policy for a service fabric service. Following are the possible values. - """ - - #: Indicates the type of the placement policy is invalid. All Service Fabric enumerations have the - #: invalid type. The value is zero. - INVALID = "Invalid" - #: Indicates that the ServicePlacementPolicyDescription is of type - #: ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or - #: upgrade domain cannot be used for placement of this service. The value is 1. - INVALID_DOMAIN = "InvalidDomain" - #: Indicates that the ServicePlacementPolicyDescription is of type - #: ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the - #: service must be placed in a specific domain. The value is 2. - REQUIRE_DOMAIN = "RequireDomain" - #: Indicates that the ServicePlacementPolicyDescription is of type - #: ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the - #: Primary replica for the partitions of the service should be located in a particular domain as - #: an optimization. The value is 3. - PREFER_PRIMARY_DOMAIN = "PreferPrimaryDomain" - #: Indicates that the ServicePlacementPolicyDescription is of type - #: ServicePlacementRequireDomainDistributionPolicyDescription, indicating that the system will - #: disallow placement of any two replicas from the same partition in the same domain at any time. - #: The value is 4. - REQUIRE_DOMAIN_DISTRIBUTION = "RequireDomainDistribution" - #: Indicates that the ServicePlacementPolicyDescription is of type - #: ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible all - #: replicas of a particular partition of the service should be placed atomically. The value is 5. - NON_PARTIALLY_PLACE_SERVICE = "NonPartiallyPlaceService" - #: Indicates that the ServicePlacementPolicyDescription is of type - #: ServicePlacementAllowMultipleStatelessInstancesOnNodePolicyDescription, which indicates that - #: multiple stateless instances of a particular partition of the service can be placed on a node. - #: The value is 6. - ALLOW_MULTIPLE_STATELESS_INSTANCES_ON_NODE = "AllowMultipleStatelessInstancesOnNode" - -class ServiceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the application. - """ - - #: Indicates the service status is unknown. The value is zero. - UNKNOWN = "Unknown" - #: Indicates the service status is active. The value is 1. - ACTIVE = "Active" - #: Indicates the service is upgrading. The value is 2. - UPGRADING = "Upgrading" - #: Indicates the service is being deleted. The value is 3. - DELETING = "Deleting" - #: Indicates the service is being created. The value is 4. - CREATING = "Creating" - #: Indicates creation or deletion was terminated due to persistent failures. Another create/delete - #: request can be accepted. The value is 5. - FAILED = "Failed" - -class ServiceTypeRegistrationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the service type registration on the node. - """ - - #: Indicates the registration status is invalid. All Service Fabric enumerations have the invalid - #: type. The value is zero. - INVALID = "Invalid" - #: Indicates that the service type is disabled on this node. A type gets disabled when there are - #: too many failures of the code package hosting the service type. If the service type is - #: disabled, new replicas of that service type will not be placed on the node until it is enabled - #: again. The service type is enabled again after the process hosting it comes up and re-registers - #: the type or a preconfigured time interval has passed. The value is 1. - DISABLED = "Disabled" - #: Indicates that the service type is enabled on this node. Replicas of this service type can be - #: placed on this node when the code package registers the service type. The value is 2. - ENABLED = "Enabled" - #: Indicates that the service type is enabled and registered on the node by a code package. - #: Replicas of this service type can now be placed on this node. The value is 3. - REGISTERED = "Registered" - -class SettingType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of the setting being given in value - """ - - #: The setting in clear text, will not be processed in any way and passed in as is. - CLEAR_TEXT = "ClearText" - #: The URI to a KeyVault secret version, will be resolved using the application's managed identity - #: (this type is only valid if the app was assigned a managed identity) before getting passed in. - KEY_VAULT_REFERENCE = "KeyVaultReference" - #: The reference to a SecretValue resource, will be resolved before getting passed in. - SECRET_VALUE_REFERENCE = "SecretValueReference" - -class SizeTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Volume size - """ - - SMALL = "Small" - MEDIUM = "Medium" - LARGE = "Large" - -class State(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The workflow state of the repair task. Valid initial states are Created, Claimed, and - Preparing. - """ - - #: Indicates that the repair task state is invalid. All Service Fabric enumerations have the - #: invalid value. - INVALID = "Invalid" - #: Indicates that the repair task has been created. - CREATED = "Created" - #: Indicates that the repair task has been claimed by a repair executor. - CLAIMED = "Claimed" - #: Indicates that the Repair Manager is preparing the system to handle the impact of the repair - #: task, usually by taking resources offline gracefully. - PREPARING = "Preparing" - #: Indicates that the repair task has been approved by the Repair Manager and is safe to execute. - APPROVED = "Approved" - #: Indicates that execution of the repair task is in progress. - EXECUTING = "Executing" - #: Indicates that the Repair Manager is restoring the system to its pre-repair state, usually by - #: bringing resources back online. - RESTORING = "Restoring" - #: Indicates that the repair task has completed, and no further state changes will occur. - COMPLETED = "Completed" - -class UpgradeDomainState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The state of the upgrade domain. - """ - - #: Indicates the upgrade domain state is invalid. All Service Fabric enumerations have the invalid - #: type. The value is zero. - INVALID = "Invalid" - #: The upgrade domain has not started upgrading yet. The value is 1. - PENDING = "Pending" - #: The upgrade domain is being upgraded but not complete yet. The value is 2. - IN_PROGRESS = "InProgress" - #: The upgrade domain has completed upgrade. The value is 3. - COMPLETED = "Completed" - -class UpgradeKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The kind of upgrade out of the following possible values. - """ - - #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: The upgrade progresses one upgrade domain at a time. The value is 1. - ROLLING = "Rolling" - -class UpgradeMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The mode used to monitor health during a rolling upgrade. The values are UnmonitoredAuto, - UnmonitoredManual, and Monitored. - """ - - #: Indicates the upgrade mode is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: The upgrade will proceed automatically without performing any health monitoring. The value is - #: 1. - UNMONITORED_AUTO = "UnmonitoredAuto" - #: The upgrade will stop after completing each upgrade domain, giving the opportunity to manually - #: monitor health before proceeding. The value is 2. - UNMONITORED_MANUAL = "UnmonitoredManual" - #: The upgrade will stop after completing each upgrade domain and automatically monitor health - #: before proceeding. The value is 3. - MONITORED = "Monitored" - -class UpgradeSortOrder(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Defines the order in which an upgrade proceeds through the cluster. - """ - - #: Indicates that this sort order is not valid. All Service Fabric enumerations have the invalid - #: type. The value is 0. - INVALID = "Invalid" - #: Indicates that the default sort order (as specified in cluster manifest) will be used. The - #: value is 1. - DEFAULT = "Default" - #: Indicates that forward numeric sort order (UD names sorted as numbers) will be used. The value - #: is 2. - NUMERIC = "Numeric" - #: Indicates that forward lexicographical sort order (UD names sorted as strings) will be used. - #: The value is 3. - LEXICOGRAPHICAL = "Lexicographical" - #: Indicates that reverse numeric sort order (UD names sorted as numbers) will be used. The value - #: is 4. - REVERSE_NUMERIC = "ReverseNumeric" - #: Indicates that reverse lexicographical sort order (UD names sorted as strings) will be used. - #: The value is 5. - REVERSE_LEXICOGRAPHICAL = "ReverseLexicographical" - -class UpgradeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The state of the upgrade domain. - """ - - #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: The upgrade is rolling back to the previous version but is not complete yet. The value is 1. - ROLLING_BACK_IN_PROGRESS = "RollingBackInProgress" - #: The upgrade has finished rolling back. The value is 2. - ROLLING_BACK_COMPLETED = "RollingBackCompleted" - #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an - #: explicit move next request in UnmonitoredManual mode or performing health checks in Monitored - #: mode. The value is 3. - ROLLING_FORWARD_PENDING = "RollingForwardPending" - #: The upgrade is rolling forward to the target version but is not complete yet. The value is 4. - ROLLING_FORWARD_IN_PROGRESS = "RollingForwardInProgress" - #: The upgrade has finished rolling forward. The value is 5. - ROLLING_FORWARD_COMPLETED = "RollingForwardCompleted" - #: The upgrade has failed and is unable to execute FailureAction. The value is 6. - FAILED = "Failed" - -class UpgradeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The type of upgrade out of the following possible values. - """ - - #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. - #: The value is zero. - INVALID = "Invalid" - #: The upgrade progresses one upgrade domain at a time. The value is 1. - ROLLING = "Rolling" - #: The upgrade gets restarted by force. The value is 2. - ROLLING_FORCE_RESTART = "Rolling_ForceRestart" - -class VolumeProvider(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Describes the provider of the volume resource. - """ - - #: Provides volumes that are backed by Azure Files. - SF_AZURE_FILE = "SFAzureFile" diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/__init__.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/__init__.py index df6b66c53161..2d275dce36ff 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/__init__.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/__init__.py @@ -1,12 +1,14 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from ._service_fabric_client_apis_operations import ServiceFabricClientAPIsOperationsMixin from ._mesh_secret_operations import MeshSecretOperations from ._mesh_secret_value_operations import MeshSecretValueOperations from ._mesh_volume_operations import MeshVolumeOperations @@ -16,9 +18,9 @@ from ._mesh_code_package_operations import MeshCodePackageOperations from ._mesh_service_replica_operations import MeshServiceReplicaOperations from ._mesh_gateway_operations import MeshGatewayOperations +from ._service_fabric_client_ap_is_operations import ServiceFabricClientAPIsOperationsMixin __all__ = [ - 'ServiceFabricClientAPIsOperationsMixin', 'MeshSecretOperations', 'MeshSecretValueOperations', 'MeshVolumeOperations', @@ -28,4 +30,5 @@ 'MeshCodePackageOperations', 'MeshServiceReplicaOperations', 'MeshGatewayOperations', + 'ServiceFabricClientAPIsOperationsMixin', ] diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_application_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_application_operations.py index 5f6723d52b0a..9982364b8c00 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_application_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_application_operations.py @@ -1,338 +1,327 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse +from msrest.pipeline import ClientRawResponse -from .. import models as _models +from .. import models -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshApplicationOperations(object): """MeshApplicationOperations operations. - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ - models = _models + models = models def __init__(self, client, config, serializer, deserializer): + self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config + + self.config = config def create_or_update( - self, - application_resource_name, # type: str - application_resource_description, # type: "_models.ApplicationResourceDescription" - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.ApplicationResourceDescription"] + self, application_resource_name, application_resource_description, custom_headers=None, raw=False, **operation_config): """Creates or updates a Application resource. - Creates a Application resource with the specified name, description and properties. If - Application resource with the same name exists, then it is updated with the specified - description and properties. + Creates a Application resource with the specified name, description and + properties. If Application resource with the same name exists, then it + is updated with the specified description and properties. :param application_resource_name: The identity of the application. :type application_resource_name: str - :param application_resource_description: Description for creating a Application resource. - :type application_resource_description: ~azure.servicefabric.models.ApplicationResourceDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError + :param application_resource_description: Description for creating a + Application resource. + :type application_resource_description: + ~azure.servicefabric.models.ApplicationResourceDescription + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationResourceDescription or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.ApplicationResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" + api_version = "6.4-preview" # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore + url = self.create_or_update.metadata['url'] path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] + query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) - body_content_kwargs = {} # type: Dict[str, Any] + # Construct body body_content = self._serialize.body(application_resource_description, 'ApplicationResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) - + deserialized = self._deserialize('ApplicationResourceDescription', response) if response.status_code == 201: - deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) + deserialized = self._deserialize('ApplicationResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - create_or_update.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore + create_or_update.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} def get( - self, - application_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.ApplicationResourceDescription" + self, application_resource_name, custom_headers=None, raw=False, **operation_config): """Gets the Application resource with the given name. - Gets the information about the Application resource with the given name. The information - include the description and other properties of the Application. + Gets the information about the Application resource with the given + name. The information include the description and other properties of + the Application. :param application_resource_name: The identity of the application. :type application_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationResourceDescription or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.ApplicationResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" + api_version = "6.4-preview" # Construct URL - url = self.get.metadata['url'] # type: ignore + url = self.get.metadata['url'] path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] + query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('ApplicationResourceDescription', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore + get.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} def delete( - self, - application_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> None + self, application_resource_name, custom_headers=None, raw=False, **operation_config): """Deletes the Application resource. Deletes the Application resource identified by the name. :param application_resource_name: The identity of the application. :type application_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" + api_version = "6.4-preview" # Construct URL - url = self.delete.metadata['url'] # type: ignore + url = self.delete.metadata['url'] path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] + query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) + raise models.FabricErrorException(self._deserialize, response) - delete.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} # type: ignore + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete.metadata = {'url': '/Resources/Applications/{applicationResourceName}'} def list( - self, - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedApplicationResourceDescriptionList" + self, custom_headers=None, raw=False, **operation_config): """Lists all the application resources. - Gets the information about all application resources in a given resource group. The information - include the description and other properties of the Application. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedApplicationResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedApplicationResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError + Gets the information about all application resources in a given + resource group. The information include the description and other + properties of the Application. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedApplicationResourceDescriptionList or ClientRawResponse + if raw=true + :rtype: + ~azure.servicefabric.models.PagedApplicationResourceDescriptionList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" + api_version = "6.4-preview" # Construct URL - url = self.list.metadata['url'] # type: ignore + url = self.list.metadata['url'] # Construct parameters - query_parameters = {} # type: Dict[str, Any] + query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('PagedApplicationResourceDescriptionList', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedApplicationResourceDescriptionList', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - list.metadata = {'url': '/Resources/Applications'} # type: ignore + list.metadata = {'url': '/Resources/Applications'} def get_upgrade_progress( - self, - application_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.ApplicationResourceUpgradeProgressInfo" - """Gets the progress of the latest upgrade performed on this application resource. + self, application_resource_name, custom_headers=None, raw=False, **operation_config): + """Gets the progress of the latest upgrade performed on this application + resource. - Gets the upgrade progress information about the Application resource with the given name. The - information include percentage of completion and other upgrade state information of the - Application resource. + Gets the upgrade progress information about the Application resource + with the given name. The information include percentage of completion + and other upgrade state information of the Application resource. :param application_resource_name: The identity of the application. :type application_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationResourceUpgradeProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationResourceUpgradeProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationResourceUpgradeProgressInfo or ClientRawResponse + if raw=true + :rtype: + ~azure.servicefabric.models.ApplicationResourceUpgradeProgressInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationResourceUpgradeProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" + api_version = "7.0" # Construct URL - url = self.get_upgrade_progress.metadata['url'] # type: ignore + url = self.get_upgrade_progress.metadata['url'] path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] + query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('ApplicationResourceUpgradeProgressInfo', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationResourceUpgradeProgressInfo', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get_upgrade_progress.metadata = {'url': '/Resources/Applications/{applicationResourceName}/$/GetUpgradeProgress'} # type: ignore + get_upgrade_progress.metadata = {'url': '/Resources/Applications/{applicationResourceName}/$/GetUpgradeProgress'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_code_package_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_code_package_operations.py index 6ae7fd989ea0..40390ca54420 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_code_package_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_code_package_operations.py @@ -1,61 +1,48 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse +from msrest.pipeline import ClientRawResponse -from .. import models as _models +from .. import models -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshCodePackageOperations(object): """MeshCodePackageOperations operations. - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. + :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = _models + models = models def __init__(self, client, config, serializer, deserializer): + self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config + + self.config = config + self.api_version = "6.4-preview" def get_container_logs( - self, - application_resource_name, # type: str - service_resource_name, # type: str - replica_name, # type: str - code_package_name, # type: str - tail=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ContainerLogs" + self, application_resource_name, service_resource_name, replica_name, code_package_name, tail=None, custom_headers=None, raw=False, **operation_config): """Gets the logs from the container. - Gets the logs for the container of the specified code package of the service replica. + Gets the logs for the container of the specified code package of the + service replica. :param application_resource_name: The identity of the application. :type application_resource_name: str @@ -65,55 +52,56 @@ def get_container_logs( :type replica_name: str :param code_package_name: The name of code package of the service. :type code_package_name: str - :param tail: Number of lines to show from the end of the logs. Default is 100. 'all' to show - the complete logs. + :param tail: Number of lines to show from the end of the logs. Default + is 100. 'all' to show the complete logs. :type tail: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ContainerLogs, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ContainerLogs - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ContainerLogs or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ContainerLogs or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerLogs"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.get_container_logs.metadata['url'] # type: ignore + url = self.get_container_logs.metadata['url'] path_format_arguments = { 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True), - 'codePackageName': self._serialize.url("code_package_name", code_package_name, 'str'), + 'codePackageName': self._serialize.url("code_package_name", code_package_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') if tail is not None: query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('ContainerLogs', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ContainerLogs', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get_container_logs.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}/CodePackages/{codePackageName}/Logs'} # type: ignore + get_container_logs.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}/CodePackages/{codePackageName}/Logs'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_gateway_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_gateway_operations.py index 3118dbad7a41..582ffc6910d6 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_gateway_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_gateway_operations.py @@ -1,279 +1,259 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse +from msrest.pipeline import ClientRawResponse -from .. import models as _models +from .. import models -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshGatewayOperations(object): """MeshGatewayOperations operations. - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. + :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = _models + models = models def __init__(self, client, config, serializer, deserializer): + self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config + + self.config = config + self.api_version = "6.4-preview" def create_or_update( - self, - gateway_resource_name, # type: str - gateway_resource_description, # type: "_models.GatewayResourceDescription" - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.GatewayResourceDescription"] + self, gateway_resource_name, gateway_resource_description, custom_headers=None, raw=False, **operation_config): """Creates or updates a Gateway resource. - Creates a Gateway resource with the specified name, description and properties. If Gateway - resource with the same name exists, then it is updated with the specified description and - properties. Use Gateway resource to provide public connectivity to application services. + Creates a Gateway resource with the specified name, description and + properties. If Gateway resource with the same name exists, then it is + updated with the specified description and properties. Use Gateway + resource to provide public connectivity to application services. :param gateway_resource_name: The identity of the gateway. :type gateway_resource_name: str - :param gateway_resource_description: Description for creating a Gateway resource. - :type gateway_resource_description: ~azure.servicefabric.models.GatewayResourceDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: GatewayResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.GatewayResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError + :param gateway_resource_description: Description for creating a + Gateway resource. + :type gateway_resource_description: + ~azure.servicefabric.models.GatewayResourceDescription + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: GatewayResourceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.GatewayResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore + url = self.create_or_update.metadata['url'] path_format_arguments = { - 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), + 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) - body_content_kwargs = {} # type: Dict[str, Any] + # Construct body body_content = self._serialize.body(gateway_resource_description, 'GatewayResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) - + deserialized = self._deserialize('GatewayResourceDescription', response) if response.status_code == 201: - deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) + deserialized = self._deserialize('GatewayResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - create_or_update.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore + create_or_update.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} def get( - self, - gateway_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.GatewayResourceDescription" + self, gateway_resource_name, custom_headers=None, raw=False, **operation_config): """Gets the Gateway resource with the given name. - Gets the information about the Gateway resource with the given name. The information include - the description and other properties of the Gateway. + Gets the information about the Gateway resource with the given name. + The information include the description and other properties of the + Gateway. :param gateway_resource_name: The identity of the gateway. :type gateway_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: GatewayResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.GatewayResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: GatewayResourceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.GatewayResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.get.metadata['url'] # type: ignore + url = self.get.metadata['url'] path_format_arguments = { - 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), + 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('GatewayResourceDescription', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('GatewayResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore + get.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} def delete( - self, - gateway_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> None + self, gateway_resource_name, custom_headers=None, raw=False, **operation_config): """Deletes the Gateway resource. Deletes the Gateway resource identified by the name. :param gateway_resource_name: The identity of the gateway. :type gateway_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.delete.metadata['url'] # type: ignore + url = self.delete.metadata['url'] path_format_arguments = { - 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True), + 'gatewayResourceName': self._serialize.url("gateway_resource_name", gateway_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} # type: ignore + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete.metadata = {'url': '/Resources/Gateways/{gatewayResourceName}'} def list( - self, - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedGatewayResourceDescriptionList" + self, custom_headers=None, raw=False, **operation_config): """Lists all the gateway resources. - Gets the information about all gateway resources in a given resource group. The information - include the description and other properties of the Gateway. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedGatewayResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedGatewayResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError + Gets the information about all gateway resources in a given resource + group. The information include the description and other properties of + the Gateway. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedGatewayResourceDescriptionList or ClientRawResponse if + raw=true + :rtype: + ~azure.servicefabric.models.PagedGatewayResourceDescriptionList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedGatewayResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.list.metadata['url'] # type: ignore + url = self.list.metadata['url'] # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('PagedGatewayResourceDescriptionList', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedGatewayResourceDescriptionList', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - list.metadata = {'url': '/Resources/Gateways'} # type: ignore + list.metadata = {'url': '/Resources/Gateways'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_network_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_network_operations.py index 8cc6feb394db..d0eaba348d13 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_network_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_network_operations.py @@ -1,284 +1,262 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse +from msrest.pipeline import ClientRawResponse -from .. import models as _models +from .. import models -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshNetworkOperations(object): """MeshNetworkOperations operations. - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. + :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = _models + models = models def __init__(self, client, config, serializer, deserializer): + self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config + + self.config = config + self.api_version = "6.4-preview" def create_or_update( - self, - network_resource_name, # type: str - name, # type: str - properties, # type: "_models.NetworkResourceProperties" - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.NetworkResourceDescription"] + self, network_resource_name, name, properties, custom_headers=None, raw=False, **operation_config): """Creates or updates a Network resource. - Creates a Network resource with the specified name, description and properties. If Network - resource with the same name exists, then it is updated with the specified description and - properties. Network resource provides connectivity between application services. + Creates a Network resource with the specified name, description and + properties. If Network resource with the same name exists, then it is + updated with the specified description and properties. Network resource + provides connectivity between application services. :param network_resource_name: The identity of the network. :type network_resource_name: str :param name: Name of the Network resource. :type name: str :param properties: Describes properties of a network resource. - :type properties: ~azure.servicefabric.models.NetworkResourceProperties - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NetworkResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NetworkResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError + :type properties: + ~azure.servicefabric.models.NetworkResourceProperties + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: NetworkResourceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.NetworkResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NetworkResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _network_resource_description = _models.NetworkResourceDescription(name=name, properties=properties) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" + network_resource_description = models.NetworkResourceDescription(name=name, properties=properties) # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore + url = self.create_or_update.metadata['url'] path_format_arguments = { - 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), + 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_network_resource_description, 'NetworkResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + # Construct body + body_content = self._serialize.body(network_resource_description, 'NetworkResourceDescription') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) - + deserialized = self._deserialize('NetworkResourceDescription', response) if response.status_code == 201: - deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) + deserialized = self._deserialize('NetworkResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - create_or_update.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore + create_or_update.metadata = {'url': '/Resources/Networks/{networkResourceName}'} def get( - self, - network_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.NetworkResourceDescription" + self, network_resource_name, custom_headers=None, raw=False, **operation_config): """Gets the Network resource with the given name. - Gets the information about the Network resource with the given name. The information include - the description and other properties of the Network. + Gets the information about the Network resource with the given name. + The information include the description and other properties of the + Network. :param network_resource_name: The identity of the network. :type network_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NetworkResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NetworkResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: NetworkResourceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.NetworkResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.get.metadata['url'] # type: ignore + url = self.get.metadata['url'] path_format_arguments = { - 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), + 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('NetworkResourceDescription', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('NetworkResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore + get.metadata = {'url': '/Resources/Networks/{networkResourceName}'} def delete( - self, - network_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> None + self, network_resource_name, custom_headers=None, raw=False, **operation_config): """Deletes the Network resource. Deletes the Network resource identified by the name. :param network_resource_name: The identity of the network. :type network_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.delete.metadata['url'] # type: ignore + url = self.delete.metadata['url'] path_format_arguments = { - 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True), + 'networkResourceName': self._serialize.url("network_resource_name", network_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Networks/{networkResourceName}'} # type: ignore + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete.metadata = {'url': '/Resources/Networks/{networkResourceName}'} def list( - self, - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedNetworkResourceDescriptionList" + self, custom_headers=None, raw=False, **operation_config): """Lists all the network resources. - Gets the information about all network resources in a given resource group. The information - include the description and other properties of the Network. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedNetworkResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedNetworkResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError + Gets the information about all network resources in a given resource + group. The information include the description and other properties of + the Network. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedNetworkResourceDescriptionList or ClientRawResponse if + raw=true + :rtype: + ~azure.servicefabric.models.PagedNetworkResourceDescriptionList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedNetworkResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.list.metadata['url'] # type: ignore + url = self.list.metadata['url'] # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('PagedNetworkResourceDescriptionList', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedNetworkResourceDescriptionList', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - list.metadata = {'url': '/Resources/Networks'} # type: ignore + list.metadata = {'url': '/Resources/Networks'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_operations.py index af905f8c9172..0c980f03c04e 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_operations.py @@ -1,61 +1,50 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse +from msrest.pipeline import ClientRawResponse -from .. import models as _models +from .. import models -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshSecretOperations(object): """MeshSecretOperations operations. - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. + :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = _models + models = models def __init__(self, client, config, serializer, deserializer): + self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config + + self.config = config + self.api_version = "6.4-preview" def create_or_update( - self, - secret_resource_name, # type: str - properties, # type: "_models.SecretResourceProperties" - name, # type: str - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.SecretResourceDescription"] + self, secret_resource_name, properties, name, custom_headers=None, raw=False, **operation_config): """Creates or updates a Secret resource. - Creates a Secret resource with the specified name, description and properties. If Secret - resource with the same name exists, then it is updated with the specified description and - properties. Once created, the kind and contentType of a secret resource cannot be updated. + Creates a Secret resource with the specified name, description and + properties. If Secret resource with the same name exists, then it is + updated with the specified description and properties. Once created, + the kind and contentType of a secret resource cannot be updated. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str @@ -63,222 +52,208 @@ def create_or_update( :type properties: ~azure.servicefabric.models.SecretResourceProperties :param name: Name of the Secret resource. :type name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SecretResourceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.SecretResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SecretResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _secret_resource_description = _models.SecretResourceDescription(properties=properties, name=name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" + secret_resource_description = models.SecretResourceDescription(properties=properties, name=name) # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore + url = self.create_or_update.metadata['url'] path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_secret_resource_description, 'SecretResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + # Construct body + body_content = self._serialize.body(secret_resource_description, 'SecretResourceDescription') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('SecretResourceDescription', pipeline_response) - + deserialized = self._deserialize('SecretResourceDescription', response) if response.status_code == 201: - deserialized = self._deserialize('SecretResourceDescription', pipeline_response) + deserialized = self._deserialize('SecretResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - create_or_update.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore + create_or_update.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} def get( - self, - secret_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.SecretResourceDescription" + self, secret_resource_name, custom_headers=None, raw=False, **operation_config): """Gets the Secret resource with the given name. - Gets the information about the Secret resource with the given name. The information include the - description and other properties of the Secret. + Gets the information about the Secret resource with the given name. The + information include the description and other properties of the Secret. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SecretResourceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.SecretResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.get.metadata['url'] # type: ignore + url = self.get.metadata['url'] path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('SecretResourceDescription', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SecretResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore + get.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} def delete( - self, - secret_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> None + self, secret_resource_name, custom_headers=None, raw=False, **operation_config): """Deletes the Secret resource. Deletes the specified Secret resource and all of its named values. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.delete.metadata['url'] # type: ignore + url = self.delete.metadata['url'] path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} # type: ignore + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}'} def list( - self, - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedSecretResourceDescriptionList" + self, custom_headers=None, raw=False, **operation_config): """Lists all the secret resources. - Gets the information about all secret resources in a given resource group. The information - include the description and other properties of the Secret. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedSecretResourceDescriptionList, or the result of cls(response) + Gets the information about all secret resources in a given resource + group. The information include the description and other properties of + the Secret. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedSecretResourceDescriptionList or ClientRawResponse if + raw=true :rtype: ~azure.servicefabric.models.PagedSecretResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSecretResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.list.metadata['url'] # type: ignore + url = self.list.metadata['url'] # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('PagedSecretResourceDescriptionList', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedSecretResourceDescriptionList', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - list.metadata = {'url': '/Resources/Secrets'} # type: ignore + list.metadata = {'url': '/Resources/Secrets'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_value_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_value_operations.py index cb62b6c4e48c..a9202b1f9140 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_value_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_secret_value_operations.py @@ -1,369 +1,341 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse +from msrest.pipeline import ClientRawResponse -from .. import models as _models +from .. import models -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshSecretValueOperations(object): """MeshSecretValueOperations operations. - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. + :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = _models + models = models def __init__(self, client, config, serializer, deserializer): + self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config + + self.config = config + self.api_version = "6.4-preview" def add_value( - self, - secret_resource_name, # type: str - secret_value_resource_name, # type: str - name, # type: str - value=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.SecretValueResourceDescription"] - """Adds the specified value as a new version of the specified secret resource. - - Creates a new value of the specified secret resource. The name of the value is typically the - version identifier. Once created the value cannot be changed. + self, secret_resource_name, secret_value_resource_name, name, value=None, custom_headers=None, raw=False, **operation_config): + """Adds the specified value as a new version of the specified secret + resource. + + Creates a new value of the specified secret resource. The name of the + value is typically the version identifier. Once created the value + cannot be changed. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource value which is typically the - version identifier for the value. + :param secret_value_resource_name: The name of the secret resource + value which is typically the version identifier for the value. :type secret_value_resource_name: str :param name: Version identifier of the secret value. :type name: str :param value: The actual value of the secret. :type value: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretValueResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretValueResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SecretValueResourceDescription or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.SecretValueResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SecretValueResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _secret_value_resource_description = _models.SecretValueResourceDescription(name=name, value=value) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" + secret_value_resource_description = models.SecretValueResourceDescription(name=name, value=value) # Construct URL - url = self.add_value.metadata['url'] # type: ignore + url = self.add_value.metadata['url'] path_format_arguments = { 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_secret_value_resource_description, 'SecretValueResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + # Construct body + body_content = self._serialize.body(secret_value_resource_description, 'SecretValueResourceDescription') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) - + deserialized = self._deserialize('SecretValueResourceDescription', response) if response.status_code == 201: - deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) + deserialized = self._deserialize('SecretValueResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - add_value.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore + add_value.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} def get( - self, - secret_resource_name, # type: str - secret_value_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.SecretValueResourceDescription" + self, secret_resource_name, secret_value_resource_name, custom_headers=None, raw=False, **operation_config): """Gets the specified secret value resource. - Get the information about the specified named secret value resources. The information does not - include the actual value of the secret. + Get the information about the specified named secret value resources. + The information does not include the actual value of the secret. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource value which is typically the - version identifier for the value. + :param secret_value_resource_name: The name of the secret resource + value which is typically the version identifier for the value. :type secret_value_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretValueResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretValueResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SecretValueResourceDescription or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.SecretValueResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretValueResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.get.metadata['url'] # type: ignore + url = self.get.metadata['url'] path_format_arguments = { 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('SecretValueResourceDescription', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SecretValueResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore + get.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} def delete( - self, - secret_resource_name, # type: str - secret_value_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> None + self, secret_resource_name, secret_value_resource_name, custom_headers=None, raw=False, **operation_config): """Deletes the specified value of the named secret resource. - Deletes the secret value resource identified by the name. The name of the resource is typically - the version associated with that value. Deletion will fail if the specified value is in use. + Deletes the secret value resource identified by the name. The name of + the resource is typically the version associated with that value. + Deletion will fail if the specified value is in use. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource value which is typically the - version identifier for the value. + :param secret_value_resource_name: The name of the secret resource + value which is typically the version identifier for the value. :type secret_value_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.delete.metadata['url'] # type: ignore + url = self.delete.metadata['url'] path_format_arguments = { 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) + raise models.FabricErrorException(self._deserialize, response) - delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} # type: ignore + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}'} def list( - self, - secret_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedSecretValueResourceDescriptionList" + self, secret_resource_name, custom_headers=None, raw=False, **operation_config): """List names of all values of the specified secret resource. - Gets information about all secret value resources of the specified secret resource. The - information includes the names of the secret value resources, but not the actual values. + Gets information about all secret value resources of the specified + secret resource. The information includes the names of the secret value + resources, but not the actual values. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedSecretValueResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedSecretValueResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedSecretValueResourceDescriptionList or ClientRawResponse + if raw=true + :rtype: + ~azure.servicefabric.models.PagedSecretValueResourceDescriptionList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSecretValueResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.list.metadata['url'] # type: ignore + url = self.list.metadata['url'] path_format_arguments = { - 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), + 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('PagedSecretValueResourceDescriptionList', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedSecretValueResourceDescriptionList', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - list.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values'} # type: ignore + list.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values'} def show( - self, - secret_resource_name, # type: str - secret_value_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.SecretValue" + self, secret_resource_name, secret_value_resource_name, custom_headers=None, raw=False, **operation_config): """Lists the specified value of the secret resource. - Lists the decrypted value of the specified named value of the secret resource. This is a - privileged operation. + Lists the decrypted value of the specified named value of the secret + resource. This is a privileged operation. :param secret_resource_name: The name of the secret resource. :type secret_resource_name: str - :param secret_value_resource_name: The name of the secret resource value which is typically the - version identifier for the value. + :param secret_value_resource_name: The name of the secret resource + value which is typically the version identifier for the value. :type secret_value_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SecretValue, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SecretValue - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: SecretValue or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.SecretValue or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretValue"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.show.metadata['url'] # type: ignore + url = self.show.metadata['url'] path_format_arguments = { 'secretResourceName': self._serialize.url("secret_resource_name", secret_resource_name, 'str', skip_quote=True), - 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True), + 'secretValueResourceName': self._serialize.url("secret_value_resource_name", secret_value_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('SecretValue', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SecretValue', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - show.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}/list_value'} # type: ignore + show.metadata = {'url': '/Resources/Secrets/{secretResourceName}/values/{secretValueResourceName}/list_value'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_operations.py index 61b6e9f5c35f..3f227dc25bf6 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_operations.py @@ -1,164 +1,155 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse +from msrest.pipeline import ClientRawResponse -from .. import models as _models +from .. import models -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshServiceOperations(object): """MeshServiceOperations operations. - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. + :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = _models + models = models def __init__(self, client, config, serializer, deserializer): + self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config + + self.config = config + self.api_version = "6.4-preview" def get( - self, - application_resource_name, # type: str - service_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.ServiceResourceDescription" + self, application_resource_name, service_resource_name, custom_headers=None, raw=False, **operation_config): """Gets the Service resource with the given name. - Gets the information about the Service resource with the given name. The information include - the description and other properties of the Service. + Gets the information about the Service resource with the given name. + The information include the description and other properties of the + Service. :param application_resource_name: The identity of the application. :type application_resource_name: str :param service_resource_name: The identity of the service. :type service_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceResourceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.get.metadata['url'] # type: ignore + url = self.get.metadata['url'] path_format_arguments = { 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), + 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('ServiceResourceDescription', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}'} # type: ignore + get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}'} def list( - self, - application_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedServiceResourceDescriptionList" + self, application_resource_name, custom_headers=None, raw=False, **operation_config): """Lists all the service resources. - Gets the information about all services of an application resource. The information include the - description and other properties of the Service. + Gets the information about all services of an application resource. The + information include the description and other properties of the + Service. :param application_resource_name: The identity of the application. :type application_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedServiceResourceDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedServiceResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedServiceResourceDescriptionList or ClientRawResponse if + raw=true + :rtype: + ~azure.servicefabric.models.PagedServiceResourceDescriptionList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.list.metadata['url'] # type: ignore + url = self.list.metadata['url'] path_format_arguments = { - 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), + 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('PagedServiceResourceDescriptionList', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedServiceResourceDescriptionList', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services'} # type: ignore + list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_replica_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_replica_operations.py index b89dd2c29a1f..29e0a80e299f 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_replica_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_service_replica_operations.py @@ -1,60 +1,49 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse +from msrest.pipeline import ClientRawResponse -from .. import models as _models +from .. import models -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshServiceReplicaOperations(object): """MeshServiceReplicaOperations operations. - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. + :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = _models + models = models def __init__(self, client, config, serializer, deserializer): + self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config + + self.config = config + self.api_version = "6.4-preview" def get( - self, - application_resource_name, # type: str - service_resource_name, # type: str - replica_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.ServiceReplicaDescription" + self, application_resource_name, service_resource_name, replica_name, custom_headers=None, raw=False, **operation_config): """Gets the given replica of the service of an application. - Gets the information about the service replica with the given name. The information include the - description and other properties of the service replica. + Gets the information about the service replica with the given name. The + information include the description and other properties of the service + replica. :param application_resource_name: The identity of the application. :type application_resource_name: str @@ -62,111 +51,109 @@ def get( :type service_resource_name: str :param replica_name: Service Fabric replica name. :type replica_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceReplicaDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceReplicaDescription - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceReplicaDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceReplicaDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceReplicaDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.get.metadata['url'] # type: ignore + url = self.get.metadata['url'] path_format_arguments = { 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), - 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True), + 'replicaName': self._serialize.url("replica_name", replica_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('ServiceReplicaDescription', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceReplicaDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}'} # type: ignore + get.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas/{replicaName}'} def list( - self, - application_resource_name, # type: str - service_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedServiceReplicaDescriptionList" + self, application_resource_name, service_resource_name, custom_headers=None, raw=False, **operation_config): """Lists all the replicas of a service. - Gets the information about all replicas of a service. The information include the description - and other properties of the service replica. + Gets the information about all replicas of a service. The information + include the description and other properties of the service replica. :param application_resource_name: The identity of the application. :type application_resource_name: str :param service_resource_name: The identity of the service. :type service_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedServiceReplicaDescriptionList, or the result of cls(response) + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedServiceReplicaDescriptionList or ClientRawResponse if + raw=true :rtype: ~azure.servicefabric.models.PagedServiceReplicaDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceReplicaDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.list.metadata['url'] # type: ignore + url = self.list.metadata['url'] path_format_arguments = { 'applicationResourceName': self._serialize.url("application_resource_name", application_resource_name, 'str', skip_quote=True), - 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True), + 'serviceResourceName': self._serialize.url("service_resource_name", service_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('PagedServiceReplicaDescriptionList', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedServiceReplicaDescriptionList', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas'} # type: ignore + list.metadata = {'url': '/Resources/Applications/{applicationResourceName}/Services/{serviceResourceName}/Replicas'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_volume_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_volume_operations.py index a9ebacf79033..7288b5ab1de8 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_volume_operations.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_mesh_volume_operations.py @@ -1,279 +1,256 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse +from msrest.pipeline import ClientRawResponse -from .. import models as _models +from .. import models -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MeshVolumeOperations(object): """MeshVolumeOperations operations. - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.servicefabric.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. + :ivar api_version: The version of the API. This parameter is required and its value must be '6.4-preview'. Constant value: "6.4-preview". """ - models = _models + models = models def __init__(self, client, config, serializer, deserializer): + self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config + + self.config = config + self.api_version = "6.4-preview" def create_or_update( - self, - volume_resource_name, # type: str - volume_resource_description, # type: "_models.VolumeResourceDescription" - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.VolumeResourceDescription"] + self, volume_resource_name, volume_resource_description, custom_headers=None, raw=False, **operation_config): """Creates or updates a Volume resource. - Creates a Volume resource with the specified name, description and properties. If Volume - resource with the same name exists, then it is updated with the specified description and - properties. + Creates a Volume resource with the specified name, description and + properties. If Volume resource with the same name exists, then it is + updated with the specified description and properties. :param volume_resource_name: The identity of the volume. :type volume_resource_name: str - :param volume_resource_description: Description for creating a Volume resource. - :type volume_resource_description: ~azure.servicefabric.models.VolumeResourceDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: VolumeResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.VolumeResourceDescription or None - :raises: ~azure.core.exceptions.HttpResponseError + :param volume_resource_description: Description for creating a Volume + resource. + :type volume_resource_description: + ~azure.servicefabric.models.VolumeResourceDescription + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: VolumeResourceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.VolumeResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VolumeResourceDescription"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - # Construct URL - url = self.create_or_update.metadata['url'] # type: ignore + url = self.create_or_update.metadata['url'] path_format_arguments = { - 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), + 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) - body_content_kwargs = {} # type: Dict[str, Any] + # Construct body body_content = self._serialize.body(volume_resource_description, 'VolumeResourceDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) - + deserialized = self._deserialize('VolumeResourceDescription', response) if response.status_code == 201: - deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) + deserialized = self._deserialize('VolumeResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - create_or_update.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore + create_or_update.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} def get( - self, - volume_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.VolumeResourceDescription" + self, volume_resource_name, custom_headers=None, raw=False, **operation_config): """Gets the Volume resource with the given name. - Gets the information about the Volume resource with the given name. The information include the - description and other properties of the Volume. + Gets the information about the Volume resource with the given name. The + information include the description and other properties of the Volume. :param volume_resource_name: The identity of the volume. :type volume_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: VolumeResourceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.VolumeResourceDescription - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: VolumeResourceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.VolumeResourceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.VolumeResourceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.get.metadata['url'] # type: ignore + url = self.get.metadata['url'] path_format_arguments = { - 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), + 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('VolumeResourceDescription', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('VolumeResourceDescription', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - get.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore + get.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} def delete( - self, - volume_resource_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> None + self, volume_resource_name, custom_headers=None, raw=False, **operation_config): """Deletes the Volume resource. Deletes the Volume resource identified by the name. :param volume_resource_name: The identity of the volume. :type volume_resource_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.delete.metadata['url'] # type: ignore + url = self.delete.metadata['url'] path_format_arguments = { - 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True), + 'volumeResourceName': self._serialize.url("volume_resource_name", volume_resource_name, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - if cls: - return cls(pipeline_response, None, {}) - - delete.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} # type: ignore + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete.metadata = {'url': '/Resources/Volumes/{volumeResourceName}'} def list( - self, - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedVolumeResourceDescriptionList" + self, custom_headers=None, raw=False, **operation_config): """Lists all the volume resources. - Gets the information about all volume resources in a given resource group. The information - include the description and other properties of the Volume. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedVolumeResourceDescriptionList, or the result of cls(response) + Gets the information about all volume resources in a given resource + group. The information include the description and other properties of + the Volume. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedVolumeResourceDescriptionList or ClientRawResponse if + raw=true :rtype: ~azure.servicefabric.models.PagedVolumeResourceDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedVolumeResourceDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - # Construct URL - url = self.list.metadata['url'] # type: ignore + url = self.list.metadata['url'] # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + # Construct and send request request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) + raise models.FabricErrorException(self._deserialize, response) - deserialized = self._deserialize('PagedVolumeResourceDescriptionList', pipeline_response) + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedVolumeResourceDescriptionList', response) - if cls: - return cls(pipeline_response, deserialized, {}) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized - list.metadata = {'url': '/Resources/Volumes'} # type: ignore + list.metadata = {'url': '/Resources/Volumes'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_ap_is_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_ap_is_operations.py new file mode 100644 index 000000000000..9725c1215f5c --- /dev/null +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_ap_is_operations.py @@ -0,0 +1,16732 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.pipeline import ClientRawResponse +from .. import models + + +class ServiceFabricClientAPIsOperationsMixin(object): + + def get_cluster_manifest( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the Service Fabric cluster manifest. + + Get the Service Fabric cluster manifest. The cluster manifest contains + properties of the cluster that include different node types on the + cluster, + security configurations, fault, and upgrade domain topologies, etc. + These properties are specified as part of the ClusterConfig.JSON file + while deploying a stand-alone cluster. However, most of the information + in the cluster manifest + is generated internally by service fabric during cluster deployment in + other deployment scenarios (e.g. when using Azure portal). + The contents of the cluster manifest are for informational purposes + only and users are not expected to take a dependency on the format of + the file contents or its interpretation. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterManifest or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ClusterManifest or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_cluster_manifest.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterManifest', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_manifest.metadata = {'url': '/$/GetClusterManifest'} + + def get_cluster_health( + self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of a Service Fabric cluster. + + Use EventsHealthStateFilter to filter the collection of health events + reported on the cluster based on the health state. + Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter + to filter the collection of nodes and applications returned based on + their aggregated health state. + + :param nodes_health_state_filter: Allows filtering of the node health + state objects returned in the result of cluster health query + based on their health state. The possible values for this parameter + include integer value of one of the + following health states. Only nodes that match the filter are + returned. All nodes are used to evaluate the aggregated health state. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of nodes + with HealthState value of OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type nodes_health_state_filter: int + :param applications_health_state_filter: Allows filtering of the + application health state objects returned in the result of cluster + health + query based on their health state. + The possible values for this parameter include integer value obtained + from members or bitwise operations + on members of HealthStateFilter enumeration. Only applications that + match the filter are returned. + All applications are used to evaluate the aggregated health state. If + not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of + applications with HealthState value of OK (2) and Warning (4) are + returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type applications_health_state_filter: int + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param include_system_application_health_statistics: Indicates whether + the health statistics should include the fabric:/System application + health statistics. False by default. + If IncludeSystemApplicationHealthStatistics is set to true, the health + statistics include the entities that belong to the fabric:/System + application. + Otherwise, the query result includes health statistics only for user + applications. + The health statistics must be included in the query result for this + parameter to be applied. + :type include_system_application_health_statistics: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ClusterHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_cluster_health.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if nodes_health_state_filter is not None: + query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') + if applications_health_state_filter is not None: + query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if include_system_application_health_statistics is not None: + query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_health.metadata = {'url': '/$/GetClusterHealth'} + + def get_cluster_health_using_policy( + self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, application_health_policy_map=None, cluster_health_policy=None, custom_headers=None, raw=False, **operation_config): + """Gets the health of a Service Fabric cluster using the specified policy. + + Use EventsHealthStateFilter to filter the collection of health events + reported on the cluster based on the health state. + Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter + to filter the collection of nodes and applications returned based on + their aggregated health state. + Use ClusterHealthPolicies to override the health policies used to + evaluate the health. + + :param nodes_health_state_filter: Allows filtering of the node health + state objects returned in the result of cluster health query + based on their health state. The possible values for this parameter + include integer value of one of the + following health states. Only nodes that match the filter are + returned. All nodes are used to evaluate the aggregated health state. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of nodes + with HealthState value of OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type nodes_health_state_filter: int + :param applications_health_state_filter: Allows filtering of the + application health state objects returned in the result of cluster + health + query based on their health state. + The possible values for this parameter include integer value obtained + from members or bitwise operations + on members of HealthStateFilter enumeration. Only applications that + match the filter are returned. + All applications are used to evaluate the aggregated health state. If + not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of + applications with HealthState value of OK (2) and Warning (4) are + returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type applications_health_state_filter: int + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param include_system_application_health_statistics: Indicates whether + the health statistics should include the fabric:/System application + health statistics. False by default. + If IncludeSystemApplicationHealthStatistics is set to true, the health + statistics include the entities that belong to the fabric:/System + application. + Otherwise, the query result includes health statistics only for user + applications. + The health statistics must be included in the query result for this + parameter to be applied. + :type include_system_application_health_statistics: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param application_health_policy_map: Defines a map that contains + specific application health policies for different applications. + Each entry specifies as key the application name and as value an + ApplicationHealthPolicy used to evaluate the application health. + If an application is not specified in the map, the application health + evaluation uses the ApplicationHealthPolicy found in its application + manifest or the default application health policy (if no health policy + is defined in the manifest). + The map is empty by default. + :type application_health_policy_map: + list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] + :param cluster_health_policy: Defines a health policy used to evaluate + the health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ClusterHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + cluster_health_policies = None + if application_health_policy_map is not None or cluster_health_policy is not None: + cluster_health_policies = models.ClusterHealthPolicies(application_health_policy_map=application_health_policy_map, cluster_health_policy=cluster_health_policy) + + api_version = "6.0" + + # Construct URL + url = self.get_cluster_health_using_policy.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if nodes_health_state_filter is not None: + query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') + if applications_health_state_filter is not None: + query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if include_system_application_health_statistics is not None: + query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if cluster_health_policies is not None: + body_content = self._serialize.body(cluster_health_policies, 'ClusterHealthPolicies') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_health_using_policy.metadata = {'url': '/$/GetClusterHealth'} + + def get_cluster_health_chunk( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of a Service Fabric cluster using health chunks. + + Gets the health of a Service Fabric cluster using health chunks. + Includes the aggregated health state of the cluster, but none of the + cluster entities. + To expand the cluster health and get the health state of all or some of + the entities, use the POST URI and specify the cluster health chunk + query description. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterHealthChunk or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ClusterHealthChunk or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_cluster_health_chunk.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterHealthChunk', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_health_chunk.metadata = {'url': '/$/GetClusterHealthChunk'} + + def get_cluster_health_chunk_using_policy_and_advanced_filters( + self, cluster_health_chunk_query_description=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of a Service Fabric cluster using health chunks. + + Gets the health of a Service Fabric cluster using health chunks. The + health evaluation is done based on the input cluster health chunk query + description. + The query description allows users to specify health policies for + evaluating the cluster and its children. + Users can specify very flexible filters to select which cluster + entities to return. The selection can be done based on the entities + health state and based on the hierarchy. + The query can return multi-level children of the entities based on the + specified filters. For example, it can return one application with a + specified name, and for this application, return + only services that are in Error or Warning, and all partitions and + replicas for one of these services. + + :param cluster_health_chunk_query_description: Describes the cluster + and application health policies used to evaluate the cluster health + and the filters to select which cluster entities to be returned. + If the cluster health policy is present, it is used to evaluate the + cluster events and the cluster nodes. If not present, the health + evaluation uses the cluster health policy defined in the cluster + manifest or the default cluster health policy. + By default, each application is evaluated using its specific + application health policy, defined in the application manifest, or the + default health policy, if no policy is defined in manifest. + If the application health policy map is specified, and it has an entry + for an application, the specified application health policy + is used to evaluate the application health. + Users can specify very flexible filters to select which cluster + entities to include in response. The selection can be done based on + the entities health state and based on the hierarchy. + The query can return multi-level children of the entities based on the + specified filters. For example, it can return one application with a + specified name, and for this application, return + only services that are in Error or Warning, and all partitions and + replicas for one of these services. + :type cluster_health_chunk_query_description: + ~azure.servicefabric.models.ClusterHealthChunkQueryDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterHealthChunk or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ClusterHealthChunk or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_cluster_health_chunk_using_policy_and_advanced_filters.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if cluster_health_chunk_query_description is not None: + body_content = self._serialize.body(cluster_health_chunk_query_description, 'ClusterHealthChunkQueryDescription') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterHealthChunk', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_health_chunk_using_policy_and_advanced_filters.metadata = {'url': '/$/GetClusterHealthChunk'} + + def report_cluster_health( + self, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Sends a health report on the Service Fabric cluster. + + Sends a health report on a Service Fabric cluster. The report must + contain the information about the source of the health report and + property on which it is reported. + The report is sent to a Service Fabric gateway node, which forwards to + the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, run + GetClusterHealth and check that the report appears in the HealthEvents + section. + + :param health_information: Describes the health information for the + health report. This information needs to be present in all of the + health reports sent to the health manager. + :type health_information: + ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be + sent immediately. + A health report is sent to a Service Fabric gateway Application, which + forwards to the health store. + If Immediate is set to true, the report is sent immediately from HTTP + Gateway to the health store, regardless of the fabric client settings + that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as + possible. + Depending on timing and other conditions, sending the report may still + fail, for example if the HTTP Gateway is closed or the message doesn't + reach the Gateway. + If Immediate is set to false, the report is sent based on the health + client settings from the HTTP Gateway. Therefore, it will be batched + according to the HealthReportSendInterval configuration. + This is the recommended setting because it allows the health client to + optimize health reporting messages to health store as well as health + report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.report_cluster_health.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(health_information, 'HealthInformation') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + report_cluster_health.metadata = {'url': '/$/ReportClusterHealth'} + + def get_provisioned_fabric_code_version_info_list( + self, code_version=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets a list of fabric code versions that are provisioned in a Service + Fabric cluster. + + Gets a list of information about fabric code versions that are + provisioned in the cluster. The parameter CodeVersion can be used to + optionally filter the output to only that particular version. + + :param code_version: The product version of Service Fabric. + :type code_version: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.FabricCodeVersionInfo] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_provisioned_fabric_code_version_info_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if code_version is not None: + query_parameters['CodeVersion'] = self._serialize.query("code_version", code_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[FabricCodeVersionInfo]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_provisioned_fabric_code_version_info_list.metadata = {'url': '/$/GetProvisionedCodeVersions'} + + def get_provisioned_fabric_config_version_info_list( + self, config_version=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets a list of fabric config versions that are provisioned in a Service + Fabric cluster. + + Gets a list of information about fabric config versions that are + provisioned in the cluster. The parameter ConfigVersion can be used to + optionally filter the output to only that particular version. + + :param config_version: The config version of Service Fabric. + :type config_version: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.FabricConfigVersionInfo] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_provisioned_fabric_config_version_info_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if config_version is not None: + query_parameters['ConfigVersion'] = self._serialize.query("config_version", config_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[FabricConfigVersionInfo]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_provisioned_fabric_config_version_info_list.metadata = {'url': '/$/GetProvisionedConfigVersions'} + + def get_cluster_upgrade_progress( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the progress of the current cluster upgrade. + + Gets the current progress of the ongoing cluster upgrade. If no upgrade + is currently in progress, get the last state of the previous cluster + upgrade. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterUpgradeProgressObject or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ClusterUpgradeProgressObject or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_cluster_upgrade_progress.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterUpgradeProgressObject', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_upgrade_progress.metadata = {'url': '/$/GetUpgradeProgress'} + + def get_cluster_configuration( + self, configuration_api_version, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the Service Fabric standalone cluster configuration. + + The cluster configuration contains properties of the cluster that + include different node types on the cluster, + security configurations, fault, and upgrade domain topologies, etc. + + :param configuration_api_version: The API version of the Standalone + cluster json configuration. + :type configuration_api_version: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterConfiguration or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ClusterConfiguration or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_cluster_configuration.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ConfigurationApiVersion'] = self._serialize.query("configuration_api_version", configuration_api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterConfiguration', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_configuration.metadata = {'url': '/$/GetClusterConfiguration'} + + def get_cluster_configuration_upgrade_status( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the cluster configuration upgrade status of a Service Fabric + standalone cluster. + + Get the cluster configuration upgrade status details of a Service + Fabric standalone cluster. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterConfigurationUpgradeStatusInfo or ClientRawResponse if + raw=true + :rtype: + ~azure.servicefabric.models.ClusterConfigurationUpgradeStatusInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_cluster_configuration_upgrade_status.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterConfigurationUpgradeStatusInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_configuration_upgrade_status.metadata = {'url': '/$/GetClusterConfigurationUpgradeStatus'} + + def get_upgrade_orchestration_service_state( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the service state of Service Fabric Upgrade Orchestration Service. + + Get the service state of Service Fabric Upgrade Orchestration Service. + This API is internally used for support purposes. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: UpgradeOrchestrationServiceState or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceState + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_upgrade_orchestration_service_state.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('UpgradeOrchestrationServiceState', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_upgrade_orchestration_service_state.metadata = {'url': '/$/GetUpgradeOrchestrationServiceState'} + + def set_upgrade_orchestration_service_state( + self, timeout=60, service_state=None, custom_headers=None, raw=False, **operation_config): + """Update the service state of Service Fabric Upgrade Orchestration + Service. + + Update the service state of Service Fabric Upgrade Orchestration + Service. This API is internally used for support purposes. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param service_state: The state of Service Fabric Upgrade + Orchestration Service. + :type service_state: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: UpgradeOrchestrationServiceStateSummary or ClientRawResponse + if raw=true + :rtype: + ~azure.servicefabric.models.UpgradeOrchestrationServiceStateSummary or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + upgrade_orchestration_service_state = models.UpgradeOrchestrationServiceState(service_state=service_state) + + api_version = "6.0" + + # Construct URL + url = self.set_upgrade_orchestration_service_state.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(upgrade_orchestration_service_state, 'UpgradeOrchestrationServiceState') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('UpgradeOrchestrationServiceStateSummary', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + set_upgrade_orchestration_service_state.metadata = {'url': '/$/SetUpgradeOrchestrationServiceState'} + + def provision_cluster( + self, timeout=60, code_file_path=None, cluster_manifest_file_path=None, custom_headers=None, raw=False, **operation_config): + """Provision the code or configuration packages of a Service Fabric + cluster. + + Validate and provision the code or configuration packages of a Service + Fabric cluster. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param code_file_path: The cluster code package file path. + :type code_file_path: str + :param cluster_manifest_file_path: The cluster manifest file path. + :type cluster_manifest_file_path: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + provision_fabric_description = models.ProvisionFabricDescription(code_file_path=code_file_path, cluster_manifest_file_path=cluster_manifest_file_path) + + api_version = "6.0" + + # Construct URL + url = self.provision_cluster.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(provision_fabric_description, 'ProvisionFabricDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + provision_cluster.metadata = {'url': '/$/Provision'} + + def unprovision_cluster( + self, timeout=60, code_version=None, config_version=None, custom_headers=None, raw=False, **operation_config): + """Unprovision the code or configuration packages of a Service Fabric + cluster. + + It is supported to unprovision code and configuration separately. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param code_version: The cluster code package version. + :type code_version: str + :param config_version: The cluster manifest version. + :type config_version: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + unprovision_fabric_description = models.UnprovisionFabricDescription(code_version=code_version, config_version=config_version) + + api_version = "6.0" + + # Construct URL + url = self.unprovision_cluster.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(unprovision_fabric_description, 'UnprovisionFabricDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + unprovision_cluster.metadata = {'url': '/$/Unprovision'} + + def rollback_cluster_upgrade( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Roll back the upgrade of a Service Fabric cluster. + + Roll back the code or configuration upgrade of a Service Fabric + cluster. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.rollback_cluster_upgrade.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + rollback_cluster_upgrade.metadata = {'url': '/$/RollbackUpgrade'} + + def resume_cluster_upgrade( + self, upgrade_domain, timeout=60, custom_headers=None, raw=False, **operation_config): + """Make the cluster upgrade move on to the next upgrade domain. + + Make the cluster code or configuration upgrade move on to the next + upgrade domain if appropriate. + + :param upgrade_domain: The next upgrade domain for this cluster + upgrade. + :type upgrade_domain: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + resume_cluster_upgrade_description = models.ResumeClusterUpgradeDescription(upgrade_domain=upgrade_domain) + + api_version = "6.0" + + # Construct URL + url = self.resume_cluster_upgrade.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(resume_cluster_upgrade_description, 'ResumeClusterUpgradeDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + resume_cluster_upgrade.metadata = {'url': '/$/MoveToNextUpgradeDomain'} + + def start_cluster_upgrade( + self, start_cluster_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Start upgrading the code or configuration version of a Service Fabric + cluster. + + Validate the supplied upgrade parameters and start upgrading the code + or configuration version of a Service Fabric cluster if the parameters + are valid. + + :param start_cluster_upgrade_description: Describes the parameters for + starting a cluster upgrade. + :type start_cluster_upgrade_description: + ~azure.servicefabric.models.StartClusterUpgradeDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.start_cluster_upgrade.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(start_cluster_upgrade_description, 'StartClusterUpgradeDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_cluster_upgrade.metadata = {'url': '/$/Upgrade'} + + def start_cluster_configuration_upgrade( + self, cluster_configuration_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Start upgrading the configuration of a Service Fabric standalone + cluster. + + Validate the supplied configuration upgrade parameters and start + upgrading the cluster configuration if the parameters are valid. + + :param cluster_configuration_upgrade_description: Parameters for a + standalone cluster configuration upgrade. + :type cluster_configuration_upgrade_description: + ~azure.servicefabric.models.ClusterConfigurationUpgradeDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.start_cluster_configuration_upgrade.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(cluster_configuration_upgrade_description, 'ClusterConfigurationUpgradeDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_cluster_configuration_upgrade.metadata = {'url': '/$/StartClusterConfigurationUpgrade'} + + def update_cluster_upgrade( + self, update_cluster_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Update the upgrade parameters of a Service Fabric cluster upgrade. + + Update the upgrade parameters used during a Service Fabric cluster + upgrade. + + :param update_cluster_upgrade_description: Parameters for updating a + cluster upgrade. + :type update_cluster_upgrade_description: + ~azure.servicefabric.models.UpdateClusterUpgradeDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.update_cluster_upgrade.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(update_cluster_upgrade_description, 'UpdateClusterUpgradeDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + update_cluster_upgrade.metadata = {'url': '/$/UpdateUpgrade'} + + def get_aad_metadata( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the Azure Active Directory metadata used for secured connection to + cluster. + + Gets the Azure Active Directory metadata used for secured connection to + cluster. + This API is not supposed to be called separately. It provides + information needed to set up an Azure Active Directory secured + connection with a Service Fabric cluster. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: AadMetadataObject or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.AadMetadataObject or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_aad_metadata.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('AadMetadataObject', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_aad_metadata.metadata = {'url': '/$/GetAadMetadata'} + + def get_cluster_version( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the current Service Fabric cluster version. + + If a cluster upgrade is happening, then this API will return the lowest + (older) version of the current and target cluster runtime versions. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterVersion or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ClusterVersion or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_cluster_version.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterVersion', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_version.metadata = {'url': '/$/GetClusterVersion'} + + def get_cluster_load( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the load of a Service Fabric cluster. + + Retrieves the load information of a Service Fabric cluster for all the + metrics that have load or capacity defined. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ClusterLoadInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ClusterLoadInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_cluster_load.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ClusterLoadInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_load.metadata = {'url': '/$/GetLoadInformation'} + + def toggle_verbose_service_placement_health_reporting( + self, enabled, timeout=60, custom_headers=None, raw=False, **operation_config): + """Changes the verbosity of service placement health reporting. + + If verbosity is set to true, then detailed health reports will be + generated when replicas cannot be placed or dropped. + If verbosity is set to false, then no health reports will be generated + when replicas cannot be placed or dropped. + + :param enabled: The verbosity of service placement health reporting. + :type enabled: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.toggle_verbose_service_placement_health_reporting.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['Enabled'] = self._serialize.query("enabled", enabled, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + toggle_verbose_service_placement_health_reporting.metadata = {'url': '/$/ToggleVerboseServicePlacementHealthReporting'} + + def get_node_info_list( + self, continuation_token=None, node_status_filter="default", max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of nodes in the Service Fabric cluster. + + The response includes the name, status, ID, health, uptime, and other + details about the nodes. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param node_status_filter: Allows filtering the nodes based on the + NodeStatus. Only the nodes that are matching the specified filter + value will be returned. The filter value can be one of the following. + Possible values include: 'default', 'all', 'up', 'down', 'enabling', + 'disabling', 'disabled', 'unknown', 'removed' + :type node_status_filter: str or + ~azure.servicefabric.models.NodeStatusFilter + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedNodeInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedNodeInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.3" + + # Construct URL + url = self.get_node_info_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if node_status_filter is not None: + query_parameters['NodeStatusFilter'] = self._serialize.query("node_status_filter", node_status_filter, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedNodeInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_node_info_list.metadata = {'url': '/Nodes'} + + def get_node_info( + self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about a specific node in the Service Fabric + cluster. + + The response includes the name, status, ID, health, uptime, and other + details about the node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: NodeInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.NodeInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_node_info.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('NodeInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_node_info.metadata = {'url': '/Nodes/{nodeName}'} + + def get_node_health( + self, node_name, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of a Service Fabric node. + + Gets the health of a Service Fabric node. Use EventsHealthStateFilter + to filter the collection of health events reported on the node based on + the health state. If the node that you specify by name does not exist + in the health store, this returns an error. + + :param node_name: The name of the node. + :type node_name: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: NodeHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.NodeHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_node_health.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('NodeHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_node_health.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} + + def get_node_health_using_policy( + self, node_name, events_health_state_filter=0, cluster_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of a Service Fabric node, by using the specified health + policy. + + Gets the health of a Service Fabric node. Use EventsHealthStateFilter + to filter the collection of health events reported on the node based on + the health state. Use ClusterHealthPolicy in the POST body to override + the health policies used to evaluate the health. If the node that you + specify by name does not exist in the health store, this returns an + error. + + :param node_name: The name of the node. + :type node_name: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param cluster_health_policy: Describes the health policies used to + evaluate the health of a cluster or node. If not present, the health + evaluation uses the health policy from cluster manifest or the default + health policy. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: NodeHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.NodeHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_node_health_using_policy.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if cluster_health_policy is not None: + body_content = self._serialize.body(cluster_health_policy, 'ClusterHealthPolicy') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('NodeHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_node_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} + + def report_node_health( + self, node_name, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Sends a health report on the Service Fabric node. + + Reports health state of the specified Service Fabric node. The report + must contain the information about the source of the health report and + property on which it is reported. + The report is sent to a Service Fabric gateway node, which forwards to + the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, run + GetNodeHealth and check that the report appears in the HealthEvents + section. + + :param node_name: The name of the node. + :type node_name: str + :param health_information: Describes the health information for the + health report. This information needs to be present in all of the + health reports sent to the health manager. + :type health_information: + ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be + sent immediately. + A health report is sent to a Service Fabric gateway Application, which + forwards to the health store. + If Immediate is set to true, the report is sent immediately from HTTP + Gateway to the health store, regardless of the fabric client settings + that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as + possible. + Depending on timing and other conditions, sending the report may still + fail, for example if the HTTP Gateway is closed or the message doesn't + reach the Gateway. + If Immediate is set to false, the report is sent based on the health + client settings from the HTTP Gateway. Therefore, it will be batched + according to the HealthReportSendInterval configuration. + This is the recommended setting because it allows the health client to + optimize health reporting messages to health store as well as health + report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.report_node_health.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(health_information, 'HealthInformation') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + report_node_health.metadata = {'url': '/Nodes/{nodeName}/$/ReportHealth'} + + def get_node_load_info( + self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the load information of a Service Fabric node. + + Retrieves the load information of a Service Fabric node for all the + metrics that have load or capacity defined. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: NodeLoadInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.NodeLoadInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_node_load_info.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('NodeLoadInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_node_load_info.metadata = {'url': '/Nodes/{nodeName}/$/GetLoadInformation'} + + def disable_node( + self, node_name, timeout=60, deactivation_intent=None, custom_headers=None, raw=False, **operation_config): + """Deactivate a Service Fabric cluster node with the specified + deactivation intent. + + Deactivate a Service Fabric cluster node with the specified + deactivation intent. Once the deactivation is in progress, the + deactivation intent can be increased, but not decreased (for example, a + node that is deactivated with the Pause intent can be deactivated + further with Restart, but not the other way around. Nodes may be + reactivated using the Activate a node operation any time after they are + deactivated. If the deactivation is not complete, this will cancel the + deactivation. A node that goes down and comes back up while deactivated + will still need to be reactivated before services will be placed on + that node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param deactivation_intent: Describes the intent or reason for + deactivating the node. The possible values are following. Possible + values include: 'Pause', 'Restart', 'RemoveData' + :type deactivation_intent: str or + ~azure.servicefabric.models.DeactivationIntent + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + deactivation_intent_description = models.DeactivationIntentDescription(deactivation_intent=deactivation_intent) + + api_version = "6.0" + + # Construct URL + url = self.disable_node.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(deactivation_intent_description, 'DeactivationIntentDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + disable_node.metadata = {'url': '/Nodes/{nodeName}/$/Deactivate'} + + def enable_node( + self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Activate a Service Fabric cluster node that is currently deactivated. + + Activates a Service Fabric cluster node that is currently deactivated. + Once activated, the node will again become a viable target for placing + new replicas, and any deactivated replicas remaining on the node will + be reactivated. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.enable_node.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + enable_node.metadata = {'url': '/Nodes/{nodeName}/$/Activate'} + + def remove_node_state( + self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Notifies Service Fabric that the persisted state on a node has been + permanently removed or lost. + + This implies that it is not possible to recover the persisted state of + that node. This generally happens if a hard disk has been wiped clean, + or if a hard disk crashes. The node has to be down for this operation + to be successful. This operation lets Service Fabric know that the + replicas on that node no longer exist, and that Service Fabric should + stop waiting for those replicas to come back up. Do not run this cmdlet + if the state on the node has not been removed and the node can come + back up with its state intact. Starting from Service Fabric 6.5, in + order to use this API for seed nodes, please change the seed nodes to + regular (non-seed) nodes and then invoke this API to remove the node + state. If the cluster is running on Azure, after the seed node goes + down, Service Fabric will try to change it to a non-seed node + automatically. To make this happen, make sure the number of non-seed + nodes in the primary node type is no less than the number of Down seed + nodes. If necessary, add more nodes to the primary node type to achieve + this. For standalone cluster, if the Down seed node is not expected to + come back up with its state intact, please remove the node from the + cluster, see + https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-windows-server-add-remove-nodes. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.remove_node_state.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + remove_node_state.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeState'} + + def restart_node( + self, node_name, node_instance_id="0", timeout=60, create_fabric_dump="False", custom_headers=None, raw=False, **operation_config): + """Restarts a Service Fabric cluster node. + + Restarts a Service Fabric cluster node that is already started. + + :param node_name: The name of the node. + :type node_name: str + :param node_instance_id: The instance ID of the target node. If + instance ID is specified the node is restarted only if it matches with + the current instance of the node. A default value of "0" would match + any instance ID. The instance ID can be obtained using get node query. + :type node_instance_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param create_fabric_dump: Specify True to create a dump of the fabric + node process. This is case-sensitive. Possible values include: + 'False', 'True' + :type create_fabric_dump: str or + ~azure.servicefabric.models.CreateFabricDump + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + restart_node_description = models.RestartNodeDescription(node_instance_id=node_instance_id, create_fabric_dump=create_fabric_dump) + + api_version = "6.0" + + # Construct URL + url = self.restart_node.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(restart_node_description, 'RestartNodeDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + restart_node.metadata = {'url': '/Nodes/{nodeName}/$/Restart'} + + def remove_configuration_overrides( + self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Removes configuration overrides on the specified node. + + This api allows removing all existing configuration overrides on + specified node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "7.0" + + # Construct URL + url = self.remove_configuration_overrides.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + remove_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/RemoveConfigurationOverrides'} + + def get_configuration_overrides( + self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of configuration overrides on the specified node. + + This api allows getting all existing configuration overrides on the + specified node. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ConfigParameterOverride] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "7.0" + + # Construct URL + url = self.get_configuration_overrides.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ConfigParameterOverride]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/GetConfigurationOverrides'} + + def add_configuration_parameter_overrides( + self, node_name, config_parameter_override_list, force=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Adds the list of configuration overrides on the specified node. + + This api allows adding all existing configuration overrides on the + specified node. + + :param node_name: The name of the node. + :type node_name: str + :param config_parameter_override_list: Description for adding list of + configuration overrides. + :type config_parameter_override_list: + list[~azure.servicefabric.models.ConfigParameterOverride] + :param force: Force adding configuration overrides on specified nodes. + :type force: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "7.0" + + # Construct URL + url = self.add_configuration_parameter_overrides.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force is not None: + query_parameters['Force'] = self._serialize.query("force", force, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(config_parameter_override_list, '[ConfigParameterOverride]') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + add_configuration_parameter_overrides.metadata = {'url': '/Nodes/{nodeName}/$/AddConfigurationParameterOverrides'} + + def remove_node_tags( + self, node_name, node_tags, custom_headers=None, raw=False, **operation_config): + """Removes the list of tags from the specified node. + + This api allows removing set of tags from the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param node_tags: Description for adding list of node tags. + :type node_tags: list[str] + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "7.0" + + # Construct URL + url = self.remove_node_tags.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(node_tags, '[str]') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + remove_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeTags'} + + def add_node_tags( + self, node_name, node_tags, custom_headers=None, raw=False, **operation_config): + """Adds the list of tags on the specified node. + + This api allows adding tags to the specified node. + + :param node_name: The name of the node. + :type node_name: str + :param node_tags: Description for adding list of node tags. + :type node_tags: list[str] + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "7.2" + + # Construct URL + url = self.add_node_tags.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(node_tags, '[str]') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + add_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/AddNodeTags'} + + def get_application_type_info_list( + self, application_type_definition_kind_filter=0, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of application types in the Service Fabric cluster. + + Returns the information about the application types that are + provisioned or in the process of being provisioned in the Service + Fabric cluster. Each version of an application type is returned as one + application type. The response includes the name, version, status, and + other details about the application type. This is a paged query, + meaning that if not all of the application types fit in a page, one + page of results is returned as well as a continuation token, which can + be used to get the next page. For example, if there are 10 application + types but a page only fits the first three application types, or if max + results is set to 3, then three is returned. To access the rest of the + results, retrieve subsequent pages by using the returned continuation + token in the next query. An empty continuation token is returned if + there are no subsequent pages. + + :param application_type_definition_kind_filter: Used to filter on + ApplicationTypeDefinitionKind which is the mechanism used to define a + Service Fabric application type. + - Default - Default value, which performs the same function as + selecting "All". The value is 0. + - All - Filter that matches input with any + ApplicationTypeDefinitionKind value. The value is 65535. + - ServiceFabricApplicationPackage - Filter that matches input with + ApplicationTypeDefinitionKind value ServiceFabricApplicationPackage. + The value is 1. + - Compose - Filter that matches input with + ApplicationTypeDefinitionKind value Compose. The value is 2. + :type application_type_definition_kind_filter: int + :param exclude_application_parameters: The flag that specifies whether + application parameters will be excluded from the result. + :type exclude_application_parameters: bool + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedApplicationTypeInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_application_type_info_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if application_type_definition_kind_filter is not None: + query_parameters['ApplicationTypeDefinitionKindFilter'] = self._serialize.query("application_type_definition_kind_filter", application_type_definition_kind_filter, 'int') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedApplicationTypeInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_type_info_list.metadata = {'url': '/ApplicationTypes'} + + def get_application_type_info_list_by_name( + self, application_type_name, application_type_version=None, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of application types in the Service Fabric cluster + matching exactly the specified name. + + Returns the information about the application types that are + provisioned or in the process of being provisioned in the Service + Fabric cluster. These results are of application types whose name match + exactly the one specified as the parameter, and which comply with the + given query parameters. All versions of the application type matching + the application type name are returned, with each version returned as + one application type. The response includes the name, version, status, + and other details about the application type. This is a paged query, + meaning that if not all of the application types fit in a page, one + page of results is returned as well as a continuation token, which can + be used to get the next page. For example, if there are 10 application + types but a page only fits the first three application types, or if max + results is set to 3, then three is returned. To access the rest of the + results, retrieve subsequent pages by using the returned continuation + token in the next query. An empty continuation token is returned if + there are no subsequent pages. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param exclude_application_parameters: The flag that specifies whether + application parameters will be excluded from the result. + :type exclude_application_parameters: bool + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedApplicationTypeInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_application_type_info_list_by_name.metadata['url'] + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if application_type_version is not None: + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedApplicationTypeInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_type_info_list_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}'} + + def provision_application_type( + self, provision_application_type_description_base_required_body_param, timeout=60, custom_headers=None, raw=False, **operation_config): + """Provisions or registers a Service Fabric application type with the + cluster using the '.sfpkg' package in the external store or using the + application package in the image store. + + Provisions a Service Fabric application type with the cluster. The + provision is required before any new applications can be instantiated. + The provision operation can be performed either on the application + package specified by the relativePathInImageStore, or by using the URI + of the external '.sfpkg'. + + :param + provision_application_type_description_base_required_body_param: The + base type of provision application type description which supports + either image store-based provision or external store-based provision. + :type provision_application_type_description_base_required_body_param: + ~azure.servicefabric.models.ProvisionApplicationTypeDescriptionBase + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.provision_application_type.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(provision_application_type_description_base_required_body_param, 'ProvisionApplicationTypeDescriptionBase') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + provision_application_type.metadata = {'url': '/ApplicationTypes/$/Provision'} + + def unprovision_application_type( + self, application_type_name, application_type_version, timeout=60, async_parameter=None, custom_headers=None, raw=False, **operation_config): + """Removes or unregisters a Service Fabric application type from the + cluster. + + This operation can only be performed if all application instances of + the application type have been deleted. Once the application type is + unregistered, no new application instances can be created for this + particular application type. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type + as defined in the application manifest. + :type application_type_version: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param async_parameter: The flag indicating whether or not unprovision + should occur asynchronously. When set to true, the unprovision + operation returns when the request is accepted by the system, and the + unprovision operation continues without any timeout limit. The default + value is false. However, we recommend setting it to true for large + application packages that were provisioned. + :type async_parameter: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + unprovision_application_type_description_info = models.UnprovisionApplicationTypeDescriptionInfo(application_type_version=application_type_version, async_property=async_parameter) + + api_version = "6.0" + + # Construct URL + url = self.unprovision_application_type.metadata['url'] + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(unprovision_application_type_description_info, 'UnprovisionApplicationTypeDescriptionInfo') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + unprovision_application_type.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/Unprovision'} + + def get_service_type_info_list( + self, application_type_name, application_type_version, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list containing the information about service types that are + supported by a provisioned application type in a Service Fabric + cluster. + + Gets the list containing the information about service types that are + supported by a provisioned application type in a Service Fabric + cluster. The provided application type must exist. Otherwise, a 404 + status is returned. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ServiceTypeInfo] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_type_info_list.metadata['url'] + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ServiceTypeInfo]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_type_info_list.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes'} + + def get_service_type_info_by_name( + self, application_type_name, application_type_version, service_type_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about a specific service type that is supported by + a provisioned application type in a Service Fabric cluster. + + Gets the information about a specific service type that is supported by + a provisioned application type in a Service Fabric cluster. The + provided application type must exist. Otherwise, a 404 status is + returned. A 204 response is returned if the specified service type is + not found in the cluster. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param service_type_name: Specifies the name of a Service Fabric + service type. + :type service_type_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceTypeInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceTypeInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_type_info_by_name.metadata['url'] + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceTypeInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_type_info_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes/{serviceTypeName}'} + + def get_service_manifest( + self, application_type_name, application_type_version, service_manifest_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the manifest describing a service type. + + Gets the manifest describing a service type. The response contains the + service manifest XML as a string. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param service_manifest_name: The name of a service manifest + registered as part of an application type in a Service Fabric cluster. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceTypeManifest or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceTypeManifest or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_manifest.metadata['url'] + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceTypeManifest', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceManifest'} + + def get_deployed_service_type_info_list( + self, node_name, application_id, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list containing the information about service types from the + applications deployed on a node in a Service Fabric cluster. + + Gets the list containing the information about service types from the + applications deployed on a node in a Service Fabric cluster. The + response includes the name of the service type, its registration + status, the code package that registered it and activation ID of the + service package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_manifest_name: The name of the service manifest to + filter the list of deployed service type information. If specified, + the response will only contain the information about service types + that are defined in this service manifest. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_service_type_info_list.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServiceTypeInfo]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_service_type_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes'} + + def get_deployed_service_type_info_by_name( + self, node_name, application_id, service_type_name, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about a specified service type of the application + deployed on a node in a Service Fabric cluster. + + Gets the list containing the information about a specific service type + from the applications deployed on a node in a Service Fabric cluster. + The response includes the name of the service type, its registration + status, the code package that registered it and activation ID of the + service package. Each entry represents one activation of a service + type, differentiated by the activation ID. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_type_name: Specifies the name of a Service Fabric + service type. + :type service_type_name: str + :param service_manifest_name: The name of the service manifest to + filter the list of deployed service type information. If specified, + the response will only contain the information about service types + that are defined in this service manifest. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_service_type_info_by_name.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServiceTypeInfo]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_service_type_info_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes/{serviceTypeName}'} + + def create_application( + self, application_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Creates a Service Fabric application. + + Creates a Service Fabric application using the specified description. + + :param application_description: Description for creating an + application. + :type application_description: + ~azure.servicefabric.models.ApplicationDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.create_application.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(application_description, 'ApplicationDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + create_application.metadata = {'url': '/Applications/$/Create'} + + def delete_application( + self, application_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes an existing Service Fabric application. + + An application must be created before it can be deleted. Deleting an + application will delete all services that are part of that application. + By default, Service Fabric will try to close service replicas in a + graceful manner and then delete the service. However, if a service is + having issues closing the replica gracefully, the delete operation may + take a long time or get stuck. Use the optional ForceRemove flag to + skip the graceful close sequence and forcefully delete the application + and all of its services. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param force_remove: Remove a Service Fabric application or service + forcefully without going through the graceful shutdown sequence. This + parameter can be used to forcefully delete an application or service + for which delete is timing out due to issues in the service code that + prevents graceful close of replicas. + :type force_remove: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.delete_application.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force_remove is not None: + query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_application.metadata = {'url': '/Applications/{applicationId}/$/Delete'} + + def get_application_load_info( + self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets load information about a Service Fabric application. + + Returns the load information about the application that was created or + in the process of being created in the Service Fabric cluster and whose + name matches the one specified as the parameter. The response includes + the name, minimum nodes, maximum nodes, the number of nodes the + application is occupying currently, and application load metric + information about the application. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationLoadInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ApplicationLoadInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_application_load_info.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationLoadInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_load_info.metadata = {'url': '/Applications/{applicationId}/$/GetLoadInformation'} + + def get_application_info_list( + self, application_definition_kind_filter=0, application_type_name=None, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of applications created in the Service Fabric cluster + that match the specified filters. + + Gets the information about the applications that were created or in the + process of being created in the Service Fabric cluster and match the + specified filters. The response includes the name, type, status, + parameters, and other details about the application. If the + applications do not fit in a page, one page of results is returned as + well as a continuation token, which can be used to get the next page. + Filters ApplicationTypeName and ApplicationDefinitionKindFilter cannot + be specified at the same time. + + :param application_definition_kind_filter: Used to filter on + ApplicationDefinitionKind, which is the mechanism used to define a + Service Fabric application. + - Default - Default value, which performs the same function as + selecting "All". The value is 0. + - All - Filter that matches input with any ApplicationDefinitionKind + value. The value is 65535. + - ServiceFabricApplicationDescription - Filter that matches input with + ApplicationDefinitionKind value ServiceFabricApplicationDescription. + The value is 1. + - Compose - Filter that matches input with ApplicationDefinitionKind + value Compose. The value is 2. + :type application_definition_kind_filter: int + :param application_type_name: The application type name used to filter + the applications to query for. This value should not contain the + application type version. + :type application_type_name: str + :param exclude_application_parameters: The flag that specifies whether + application parameters will be excluded from the result. + :type exclude_application_parameters: bool + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedApplicationInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedApplicationInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.1" + + # Construct URL + url = self.get_application_info_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if application_definition_kind_filter is not None: + query_parameters['ApplicationDefinitionKindFilter'] = self._serialize.query("application_definition_kind_filter", application_definition_kind_filter, 'int') + if application_type_name is not None: + query_parameters['ApplicationTypeName'] = self._serialize.query("application_type_name", application_type_name, 'str') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedApplicationInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_info_list.metadata = {'url': '/Applications'} + + def get_application_info( + self, application_id, exclude_application_parameters=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets information about a Service Fabric application. + + Returns the information about the application that was created or in + the process of being created in the Service Fabric cluster and whose + name matches the one specified as the parameter. The response includes + the name, type, status, parameters, and other details about the + application. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param exclude_application_parameters: The flag that specifies whether + application parameters will be excluded from the result. + :type exclude_application_parameters: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ApplicationInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_application_info.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if exclude_application_parameters is not None: + query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_info.metadata = {'url': '/Applications/{applicationId}'} + + def get_application_health( + self, application_id, events_health_state_filter=0, deployed_applications_health_state_filter=0, services_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of the service fabric application. + + Returns the heath state of the service fabric application. The response + reports either Ok, Error or Warning health state. If the entity is not + found in the health store, it will return Error. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param deployed_applications_health_state_filter: Allows filtering of + the deployed applications health state objects returned in the result + of application health query based on their health state. + The possible values for this parameter include integer value of one of + the following health states. Only deployed applications that match the + filter will be returned. + All deployed applications are used to evaluate the aggregated health + state. If not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a + combination of these values, obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of deployed + applications with HealthState value of OK (2) and Warning (4) are + returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type deployed_applications_health_state_filter: int + :param services_health_state_filter: Allows filtering of the services + health state objects returned in the result of services health query + based on their health state. + The possible values for this parameter include integer value of one of + the following health states. + Only services that match the filter are returned. All services are + used to evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, + obtained using bitwise 'OR' operator. For example, if the provided + value is 6 then health state of services with HealthState value of OK + (2) and Warning (4) will be returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type services_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ApplicationHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_application_health.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_applications_health_state_filter is not None: + query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') + if services_health_state_filter is not None: + query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_health.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} + + def get_application_health_using_policy( + self, application_id, events_health_state_filter=0, deployed_applications_health_state_filter=0, services_health_state_filter=0, exclude_health_statistics=False, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of a Service Fabric application using the specified + policy. + + Gets the health of a Service Fabric application. Use + EventsHealthStateFilter to filter the collection of health events + reported on the node based on the health state. Use + ClusterHealthPolicies to override the health policies used to evaluate + the health. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param deployed_applications_health_state_filter: Allows filtering of + the deployed applications health state objects returned in the result + of application health query based on their health state. + The possible values for this parameter include integer value of one of + the following health states. Only deployed applications that match the + filter will be returned. + All deployed applications are used to evaluate the aggregated health + state. If not specified, all entries are returned. + The state values are flag-based enumeration, so the value could be a + combination of these values, obtained using bitwise 'OR' operator. + For example, if the provided value is 6 then health state of deployed + applications with HealthState value of OK (2) and Warning (4) are + returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type deployed_applications_health_state_filter: int + :param services_health_state_filter: Allows filtering of the services + health state objects returned in the result of services health query + based on their health state. + The possible values for this parameter include integer value of one of + the following health states. + Only services that match the filter are returned. All services are + used to evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, + obtained using bitwise 'OR' operator. For example, if the provided + value is 6 then health state of services with HealthState value of OK + (2) and Warning (4) will be returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type services_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param application_health_policy: Describes the health policies used + to evaluate the health of an application or one of its children. + If not present, the health evaluation uses the health policy from + application manifest or the default health policy. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ApplicationHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_application_health_using_policy.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_applications_health_state_filter is not None: + query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') + if services_health_state_filter is not None: + query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_health_using_policy.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} + + def report_application_health( + self, application_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Sends a health report on the Service Fabric application. + + Reports health state of the specified Service Fabric application. The + report must contain the information about the source of the health + report and property on which it is reported. + The report is sent to a Service Fabric gateway Application, which + forwards to the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, get + application health and check that the report appears in the + HealthEvents section. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param health_information: Describes the health information for the + health report. This information needs to be present in all of the + health reports sent to the health manager. + :type health_information: + ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be + sent immediately. + A health report is sent to a Service Fabric gateway Application, which + forwards to the health store. + If Immediate is set to true, the report is sent immediately from HTTP + Gateway to the health store, regardless of the fabric client settings + that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as + possible. + Depending on timing and other conditions, sending the report may still + fail, for example if the HTTP Gateway is closed or the message doesn't + reach the Gateway. + If Immediate is set to false, the report is sent based on the health + client settings from the HTTP Gateway. Therefore, it will be batched + according to the HealthReportSendInterval configuration. + This is the recommended setting because it allows the health client to + optimize health reporting messages to health store as well as health + report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.report_application_health.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(health_information, 'HealthInformation') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + report_application_health.metadata = {'url': '/Applications/{applicationId}/$/ReportHealth'} + + def start_application_upgrade( + self, application_id, application_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Starts upgrading an application in the Service Fabric cluster. + + Validates the supplied application upgrade parameters and starts + upgrading the application if the parameters are valid. + Note, + [ApplicationParameter](https://docs.microsoft.com/dotnet/api/system.fabric.description.applicationdescription.applicationparameters)s + are not preserved across an application upgrade. + In order to preserve current application parameters, the user should + get the parameters using [GetApplicationInfo](./GetApplicationInfo.md) + operation first and pass them into the upgrade API call as shown in the + example. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param application_upgrade_description: Parameters for an application + upgrade. + :type application_upgrade_description: + ~azure.servicefabric.models.ApplicationUpgradeDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.start_application_upgrade.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(application_upgrade_description, 'ApplicationUpgradeDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/Upgrade'} + + def get_application_upgrade( + self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets details for the latest upgrade performed on this application. + + Returns information about the state of the latest application upgrade + along with details to aid debugging application health issues. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationUpgradeProgressInfo or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.ApplicationUpgradeProgressInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_application_upgrade.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationUpgradeProgressInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/GetUpgradeProgress'} + + def update_application_upgrade( + self, application_id, application_upgrade_update_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Updates an ongoing application upgrade in the Service Fabric cluster. + + Updates the parameters of an ongoing application upgrade from the ones + specified at the time of starting the application upgrade. This may be + required to mitigate stuck application upgrades due to incorrect + parameters or issues in the application to make progress. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param application_upgrade_update_description: Parameters for updating + an existing application upgrade. + :type application_upgrade_update_description: + ~azure.servicefabric.models.ApplicationUpgradeUpdateDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.update_application_upgrade.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(application_upgrade_update_description, 'ApplicationUpgradeUpdateDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + update_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/UpdateUpgrade'} + + def resume_application_upgrade( + self, application_id, upgrade_domain_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Resumes upgrading an application in the Service Fabric cluster. + + Resumes an unmonitored manual Service Fabric application upgrade. + Service Fabric upgrades one upgrade domain at a time. For unmonitored + manual upgrades, after Service Fabric finishes an upgrade domain, it + waits for you to call this API before proceeding to the next upgrade + domain. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param upgrade_domain_name: The name of the upgrade domain in which to + resume the upgrade. + :type upgrade_domain_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + resume_application_upgrade_description = models.ResumeApplicationUpgradeDescription(upgrade_domain_name=upgrade_domain_name) + + api_version = "6.0" + + # Construct URL + url = self.resume_application_upgrade.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(resume_application_upgrade_description, 'ResumeApplicationUpgradeDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + resume_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/MoveToNextUpgradeDomain'} + + def rollback_application_upgrade( + self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Starts rolling back the currently on-going upgrade of an application in + the Service Fabric cluster. + + Starts rolling back the current application upgrade to the previous + version. This API can only be used to roll back the current in-progress + upgrade that is rolling forward to new version. If the application is + not currently being upgraded use StartApplicationUpgrade API to upgrade + it to desired version, including rolling back to a previous version. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.rollback_application_upgrade.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + rollback_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/RollbackUpgrade'} + + def get_deployed_application_info_list( + self, node_name, timeout=60, include_health_state=False, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): + """Gets the list of applications deployed on a Service Fabric node. + + Gets the list of applications deployed on a Service Fabric node. The + results do not include information about deployed system applications + unless explicitly queried for by ID. Results encompass deployed + applications in active, activating, and downloading states. This query + requires that the node name corresponds to a node on the cluster. The + query fails if the provided node name does not point to any active + Service Fabric nodes on the cluster. + + :param node_name: The name of the node. + :type node_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param include_health_state: Include the health state of an entity. + If this parameter is false or not specified, then the health state + returned is "Unknown". + When set to true, the query goes in parallel to the node and the + health system service before the results are merged. + As a result, the query is more expensive and may take a longer time. + :type include_health_state: bool + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedDeployedApplicationInfoList or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PagedDeployedApplicationInfoList + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.1" + + # Construct URL + url = self.get_deployed_application_info_list.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if include_health_state is not None: + query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedDeployedApplicationInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_application_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications'} + + def get_deployed_application_info( + self, node_name, application_id, timeout=60, include_health_state=False, custom_headers=None, raw=False, **operation_config): + """Gets the information about an application deployed on a Service Fabric + node. + + This query returns system application information if the application ID + provided is for system application. Results encompass deployed + applications in active, activating, and downloading states. This query + requires that the node name corresponds to a node on the cluster. The + query fails if the provided node name does not point to any active + Service Fabric nodes on the cluster. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param include_health_state: Include the health state of an entity. + If this parameter is false or not specified, then the health state + returned is "Unknown". + When set to true, the query goes in parallel to the node and the + health system service before the results are merged. + As a result, the query is more expensive and may take a longer time. + :type include_health_state: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: DeployedApplicationInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.DeployedApplicationInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.1" + + # Construct URL + url = self.get_deployed_application_info.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if include_health_state is not None: + query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DeployedApplicationInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_application_info.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}'} + + def get_deployed_application_health( + self, node_name, application_id, events_health_state_filter=0, deployed_service_packages_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about health of an application deployed on a + Service Fabric node. + + Gets the information about health of an application deployed on a + Service Fabric node. Use EventsHealthStateFilter to optionally filter + for the collection of HealthEvent objects reported on the deployed + application based on health state. Use + DeployedServicePackagesHealthStateFilter to optionally filter for + DeployedServicePackageHealth children based on health state. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param deployed_service_packages_health_state_filter: Allows filtering + of the deployed service package health state objects returned in the + result of deployed application health query based on their health + state. + The possible values for this parameter include integer value of one of + the following health states. + Only deployed service packages that match the filter are returned. All + deployed service packages are used to evaluate the aggregated health + state of the deployed application. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value can be a + combination of these values, obtained using the bitwise 'OR' operator. + For example, if the provided value is 6 then health state of service + packages with HealthState value of OK (2) and Warning (4) are + returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type deployed_service_packages_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: DeployedApplicationHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.DeployedApplicationHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_application_health.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_service_packages_health_state_filter is not None: + query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DeployedApplicationHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} + + def get_deployed_application_health_using_policy( + self, node_name, application_id, events_health_state_filter=0, deployed_service_packages_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about health of an application deployed on a + Service Fabric node. using the specified policy. + + Gets the information about health of an application deployed on a + Service Fabric node using the specified policy. Use + EventsHealthStateFilter to optionally filter for the collection of + HealthEvent objects reported on the deployed application based on + health state. Use DeployedServicePackagesHealthStateFilter to + optionally filter for DeployedServicePackageHealth children based on + health state. Use ApplicationHealthPolicy to optionally override the + health policies used to evaluate the health. This API only uses + 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The rest + of the fields are ignored while evaluating the health of the deployed + application. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param deployed_service_packages_health_state_filter: Allows filtering + of the deployed service package health state objects returned in the + result of deployed application health query based on their health + state. + The possible values for this parameter include integer value of one of + the following health states. + Only deployed service packages that match the filter are returned. All + deployed service packages are used to evaluate the aggregated health + state of the deployed application. + If not specified, all entries are returned. + The state values are flag-based enumeration, so the value can be a + combination of these values, obtained using the bitwise 'OR' operator. + For example, if the provided value is 6 then health state of service + packages with HealthState value of OK (2) and Warning (4) are + returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type deployed_service_packages_health_state_filter: int + :param application_health_policy: Describes the health policies used + to evaluate the health of an application or one of its children. + If not present, the health evaluation uses the health policy from + application manifest or the default health policy. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: DeployedApplicationHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.DeployedApplicationHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_application_health_using_policy.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if deployed_service_packages_health_state_filter is not None: + query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DeployedApplicationHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_application_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} + + def report_deployed_application_health( + self, node_name, application_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Sends a health report on the Service Fabric application deployed on a + Service Fabric node. + + Reports health state of the application deployed on a Service Fabric + node. The report must contain the information about the source of the + health report and property on which it is reported. + The report is sent to a Service Fabric gateway Service, which forwards + to the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, get deployed + application health and check that the report appears in the + HealthEvents section. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param health_information: Describes the health information for the + health report. This information needs to be present in all of the + health reports sent to the health manager. + :type health_information: + ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be + sent immediately. + A health report is sent to a Service Fabric gateway Application, which + forwards to the health store. + If Immediate is set to true, the report is sent immediately from HTTP + Gateway to the health store, regardless of the fabric client settings + that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as + possible. + Depending on timing and other conditions, sending the report may still + fail, for example if the HTTP Gateway is closed or the message doesn't + reach the Gateway. + If Immediate is set to false, the report is sent based on the health + client settings from the HTTP Gateway. Therefore, it will be batched + according to the HealthReportSendInterval configuration. + This is the recommended setting because it allows the health client to + optimize health reporting messages to health store as well as health + report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.report_deployed_application_health.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(health_information, 'HealthInformation') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + report_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/ReportHealth'} + + def get_application_manifest( + self, application_type_name, application_type_version, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the manifest describing an application type. + + The response contains the application manifest XML as a string. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationTypeManifest or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ApplicationTypeManifest or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_application_manifest.metadata['url'] + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationTypeManifest', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetApplicationManifest'} + + def get_service_info_list( + self, application_id, service_type_name=None, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about all services belonging to the application + specified by the application ID. + + Returns the information about all services belonging to the application + specified by the application ID. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_type_name: The service type name used to filter the + services to query for. + :type service_type_name: str + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedServiceInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedServiceInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_info_list.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if service_type_name is not None: + query_parameters['ServiceTypeName'] = self._serialize.query("service_type_name", service_type_name, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedServiceInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_info_list.metadata = {'url': '/Applications/{applicationId}/$/GetServices'} + + def get_service_info( + self, application_id, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about the specific service belonging to the + Service Fabric application. + + Returns the information about the specified service belonging to the + specified Service Fabric application. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_info.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_info.metadata = {'url': '/Applications/{applicationId}/$/GetServices/{serviceId}'} + + def get_application_name_info( + self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the name of the Service Fabric application for a service. + + Gets the name of the application for the specified service. A 404 + FABRIC_E_SERVICE_DOES_NOT_EXIST error is returned if a service with the + provided service ID does not exist. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationNameInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ApplicationNameInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_application_name_info.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ApplicationNameInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_name_info.metadata = {'url': '/Services/{serviceId}/$/GetApplicationName'} + + def create_service( + self, application_id, service_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Creates the specified Service Fabric service. + + This api allows creating a new Service Fabric stateless or stateful + service under a specified Service Fabric application. The description + for creating the service includes partitioning information and optional + properties for placement and load balancing. Some of the properties can + later be modified using `UpdateService` API. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_description: The information necessary to create a + service. + :type service_description: + ~azure.servicefabric.models.ServiceDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.create_service.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(service_description, 'ServiceDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + create_service.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/Create'} + + def create_service_from_template( + self, application_id, service_from_template_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Creates a Service Fabric service from the service template. + + Creates a Service Fabric service from the service template defined in + the application manifest. A service template contains the properties + that will be same for the service instance of the same type. The API + allows overriding the properties that are usually different for + different services of the same service type. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_from_template_description: Describes the service that + needs to be created from the template defined in the application + manifest. + :type service_from_template_description: + ~azure.servicefabric.models.ServiceFromTemplateDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.create_service_from_template.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(service_from_template_description, 'ServiceFromTemplateDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + create_service_from_template.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/CreateFromTemplate'} + + def delete_service( + self, service_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes an existing Service Fabric service. + + A service must be created before it can be deleted. By default, Service + Fabric will try to close service replicas in a graceful manner and then + delete the service. However, if the service is having issues closing + the replica gracefully, the delete operation may take a long time or + get stuck. Use the optional ForceRemove flag to skip the graceful close + sequence and forcefully delete the service. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param force_remove: Remove a Service Fabric application or service + forcefully without going through the graceful shutdown sequence. This + parameter can be used to forcefully delete an application or service + for which delete is timing out due to issues in the service code that + prevents graceful close of replicas. + :type force_remove: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.delete_service.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force_remove is not None: + query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_service.metadata = {'url': '/Services/{serviceId}/$/Delete'} + + def update_service( + self, service_id, service_update_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Updates a Service Fabric service using the specified update + description. + + This API allows updating properties of a running Service Fabric + service. The set of properties that can be updated are a subset of the + properties that were specified at the time of creating the service. The + current set of properties can be obtained using `GetServiceDescription` + API. Note that updating the properties of a running service is + different than upgrading your application using + `StartApplicationUpgrade` API. The upgrade is a long running background + operation that involves moving the application from one version to + another, one upgrade domain at a time, whereas update applies the new + properties immediately to the service. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param service_update_description: The information necessary to update + a service. + :type service_update_description: + ~azure.servicefabric.models.ServiceUpdateDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.update_service.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(service_update_description, 'ServiceUpdateDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + update_service.metadata = {'url': '/Services/{serviceId}/$/Update'} + + def get_service_description( + self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the description of an existing Service Fabric service. + + Gets the description of an existing Service Fabric service. A service + must be created before its description can be obtained. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_description.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceDescription', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_description.metadata = {'url': '/Services/{serviceId}/$/GetDescription'} + + def get_service_health( + self, service_id, events_health_state_filter=0, partitions_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of the specified Service Fabric service. + + Gets the health information of the specified service. + Use EventsHealthStateFilter to filter the collection of health events + reported on the service based on the health state. + Use PartitionsHealthStateFilter to filter the collection of partitions + returned. + If you specify a service that does not exist in the health store, this + request returns an error. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param partitions_health_state_filter: Allows filtering of the + partitions health state objects returned in the result of service + health query based on their health state. + The possible values for this parameter include integer value of one of + the following health states. + Only partitions that match the filter are returned. All partitions are + used to evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + value + obtained using bitwise 'OR' operator. For example, if the provided + value is 6 then health state of partitions with HealthState value of + OK (2) and Warning (4) will be returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type partitions_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_health.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if partitions_health_state_filter is not None: + query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_health.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} + + def get_service_health_using_policy( + self, service_id, events_health_state_filter=0, partitions_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of the specified Service Fabric service, by using the + specified health policy. + + Gets the health information of the specified service. + If the application health policy is specified, the health evaluation + uses it to get the aggregated health state. + If the policy is not specified, the health evaluation uses the + application health policy defined in the application manifest, or the + default health policy, if no policy is defined in the manifest. + Use EventsHealthStateFilter to filter the collection of health events + reported on the service based on the health state. + Use PartitionsHealthStateFilter to filter the collection of partitions + returned. + If you specify a service that does not exist in the health store, this + request returns an error. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param partitions_health_state_filter: Allows filtering of the + partitions health state objects returned in the result of service + health query based on their health state. + The possible values for this parameter include integer value of one of + the following health states. + Only partitions that match the filter are returned. All partitions are + used to evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + value + obtained using bitwise 'OR' operator. For example, if the provided + value is 6 then health state of partitions with HealthState value of + OK (2) and Warning (4) will be returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type partitions_health_state_filter: int + :param application_health_policy: Describes the health policies used + to evaluate the health of an application or one of its children. + If not present, the health evaluation uses the health policy from + application manifest or the default health policy. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_health_using_policy.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if partitions_health_state_filter is not None: + query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_health_using_policy.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} + + def report_service_health( + self, service_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Sends a health report on the Service Fabric service. + + Reports health state of the specified Service Fabric service. The + report must contain the information about the source of the health + report and property on which it is reported. + The report is sent to a Service Fabric gateway Service, which forwards + to the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, run + GetServiceHealth and check that the report appears in the HealthEvents + section. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param health_information: Describes the health information for the + health report. This information needs to be present in all of the + health reports sent to the health manager. + :type health_information: + ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be + sent immediately. + A health report is sent to a Service Fabric gateway Application, which + forwards to the health store. + If Immediate is set to true, the report is sent immediately from HTTP + Gateway to the health store, regardless of the fabric client settings + that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as + possible. + Depending on timing and other conditions, sending the report may still + fail, for example if the HTTP Gateway is closed or the message doesn't + reach the Gateway. + If Immediate is set to false, the report is sent based on the health + client settings from the HTTP Gateway. Therefore, it will be batched + according to the HealthReportSendInterval configuration. + This is the recommended setting because it allows the health client to + optimize health reporting messages to health store as well as health + report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.report_service_health.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(health_information, 'HealthInformation') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + report_service_health.metadata = {'url': '/Services/{serviceId}/$/ReportHealth'} + + def resolve_service( + self, service_id, partition_key_type=None, partition_key_value=None, previous_rsp_version=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Resolve a Service Fabric partition. + + Resolve a Service Fabric service partition to get the endpoints of the + service replicas. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_key_type: Key type for the partition. This parameter + is required if the partition scheme for the service is Int64Range or + Named. The possible values are following. + - None (1) - Indicates that the PartitionKeyValue parameter is not + specified. This is valid for the partitions with partitioning scheme + as Singleton. This is the default value. The value is 1. + - Int64Range (2) - Indicates that the PartitionKeyValue parameter is + an int64 partition key. This is valid for the partitions with + partitioning scheme as Int64Range. The value is 2. + - Named (3) - Indicates that the PartitionKeyValue parameter is a name + of the partition. This is valid for the partitions with partitioning + scheme as Named. The value is 3. + :type partition_key_type: int + :param partition_key_value: Partition key. This is required if the + partition scheme for the service is Int64Range or Named. + This is not the partition ID, but rather, either the integer key + value, or the name of the partition ID. + For example, if your service is using ranged partitions from 0 to 10, + then they PartitionKeyValue would be an + integer in that range. Query service description to see the range or + name. + :type partition_key_value: str + :param previous_rsp_version: The value in the Version field of the + response that was received previously. This is required if the user + knows that the result that was gotten previously is stale. + :type previous_rsp_version: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ResolvedServicePartition or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ResolvedServicePartition or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.resolve_service.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if partition_key_type is not None: + query_parameters['PartitionKeyType'] = self._serialize.query("partition_key_type", partition_key_type, 'int') + if partition_key_value is not None: + query_parameters['PartitionKeyValue'] = self._serialize.query("partition_key_value", partition_key_value, 'str', skip_quote=True) + if previous_rsp_version is not None: + query_parameters['PreviousRspVersion'] = self._serialize.query("previous_rsp_version", previous_rsp_version, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ResolvedServicePartition', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + resolve_service.metadata = {'url': '/Services/{serviceId}/$/ResolvePartition'} + + def get_unplaced_replica_information( + self, service_id, partition_id=None, only_query_primaries=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about unplaced replica of the service. + + Returns the information about the unplaced replicas of the service. + If PartitionId is specified, then result will contain information only + about unplaced replicas for that partition. + If PartitionId is not specified, then result will contain information + about unplaced replicas for all partitions of that service. + If OnlyQueryPrimaries is set to true, then result will contain + information only about primary replicas, and will ignore unplaced + secondary replicas. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param only_query_primaries: Indicates that unplaced replica + information will be queries only for primary replicas. + :type only_query_primaries: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: UnplacedReplicaInformation or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.UnplacedReplicaInformation or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_unplaced_replica_information.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if partition_id is not None: + query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') + if only_query_primaries is not None: + query_parameters['OnlyQueryPrimaries'] = self._serialize.query("only_query_primaries", only_query_primaries, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('UnplacedReplicaInformation', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_unplaced_replica_information.metadata = {'url': '/Services/{serviceId}/$/GetUnplacedReplicaInformation'} + + def get_loaded_partition_info_list( + self, metric_name, service_name=None, ordering="Desc", max_results=0, continuation_token=None, custom_headers=None, raw=False, **operation_config): + """Gets ordered list of partitions. + + Retrieves partitions which are most/least loaded according to specified + metric. + + :param metric_name: Name of the metric based on which to get ordered + list of partitions. + :type metric_name: str + :param service_name: The name of a service. + :type service_name: str + :param ordering: Ordering of partitions' load. Possible values + include: 'Desc', 'Asc' + :type ordering: str or ~azure.servicefabric.models.Ordering + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: LoadedPartitionInformationResultList or ClientRawResponse if + raw=true + :rtype: + ~azure.servicefabric.models.LoadedPartitionInformationResultList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "8.0" + + # Construct URL + url = self.get_loaded_partition_info_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['MetricName'] = self._serialize.query("metric_name", metric_name, 'str') + if service_name is not None: + query_parameters['ServiceName'] = self._serialize.query("service_name", service_name, 'str') + if ordering is not None: + query_parameters['Ordering'] = self._serialize.query("ordering", ordering, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('LoadedPartitionInformationResultList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_loaded_partition_info_list.metadata = {'url': '/$/GetLoadedPartitionInfoList'} + + def get_partition_info_list( + self, service_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of partitions of a Service Fabric service. + + The response includes the partition ID, partitioning scheme + information, keys supported by the partition, status, health, and other + details about the partition. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedServicePartitionInfoList or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PagedServicePartitionInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_partition_info_list.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedServicePartitionInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_info_list.metadata = {'url': '/Services/{serviceId}/$/GetPartitions'} + + def get_partition_info( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about a Service Fabric partition. + + Gets the information about the specified partition. The response + includes the partition ID, partitioning scheme information, keys + supported by the partition, status, health, and other details about the + partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServicePartitionInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServicePartitionInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_partition_info.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServicePartitionInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_info.metadata = {'url': '/Partitions/{partitionId}'} + + def get_service_name_info( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the name of the Service Fabric service for a partition. + + Gets name of the service for the specified partition. A 404 error is + returned if the partition ID does not exist in the cluster. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceNameInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceNameInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_name_info.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ServiceNameInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_name_info.metadata = {'url': '/Partitions/{partitionId}/$/GetServiceName'} + + def get_partition_health( + self, partition_id, events_health_state_filter=0, replicas_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of the specified Service Fabric partition. + + Use EventsHealthStateFilter to filter the collection of health events + reported on the service based on the health state. + Use ReplicasHealthStateFilter to filter the collection of + ReplicaHealthState objects on the partition. + If you specify a partition that does not exist in the health store, + this request returns an error. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param replicas_health_state_filter: Allows filtering the collection + of ReplicaHealthState objects on the partition. The value can be + obtained from members or bitwise operations on members of + HealthStateFilter. Only replicas that match the filter will be + returned. All replicas will be used to evaluate the aggregated health + state. If not specified, all entries will be returned.The state values + are flag-based enumeration, so the value could be a combination of + these values obtained using bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) will be returned. The possible values for this + parameter include integer value of one of the following health states. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type replicas_health_state_filter: int + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PartitionHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PartitionHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_partition_health.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if replicas_health_state_filter is not None: + query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PartitionHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} + + def get_partition_health_using_policy( + self, partition_id, events_health_state_filter=0, replicas_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of the specified Service Fabric partition, by using the + specified health policy. + + Gets the health information of the specified partition. + If the application health policy is specified, the health evaluation + uses it to get the aggregated health state. + If the policy is not specified, the health evaluation uses the + application health policy defined in the application manifest, or the + default health policy, if no policy is defined in the manifest. + Use EventsHealthStateFilter to filter the collection of health events + reported on the partition based on the health state. + Use ReplicasHealthStateFilter to filter the collection of + ReplicaHealthState objects on the partition. Use + ApplicationHealthPolicy in the POST body to override the health + policies used to evaluate the health. + If you specify a partition that does not exist in the health store, + this request returns an error. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param replicas_health_state_filter: Allows filtering the collection + of ReplicaHealthState objects on the partition. The value can be + obtained from members or bitwise operations on members of + HealthStateFilter. Only replicas that match the filter will be + returned. All replicas will be used to evaluate the aggregated health + state. If not specified, all entries will be returned.The state values + are flag-based enumeration, so the value could be a combination of + these values obtained using bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) will be returned. The possible values for this + parameter include integer value of one of the following health states. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type replicas_health_state_filter: int + :param application_health_policy: Describes the health policies used + to evaluate the health of an application or one of its children. + If not present, the health evaluation uses the health policy from + application manifest or the default health policy. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param exclude_health_statistics: Indicates whether the health + statistics should be returned as part of the query result. False by + default. + The statistics show the number of children entities in health state + Ok, Warning, and Error. + :type exclude_health_statistics: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PartitionHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PartitionHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_partition_health_using_policy.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if replicas_health_state_filter is not None: + query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') + if exclude_health_statistics is not None: + query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PartitionHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} + + def report_partition_health( + self, partition_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Sends a health report on the Service Fabric partition. + + Reports health state of the specified Service Fabric partition. The + report must contain the information about the source of the health + report and property on which it is reported. + The report is sent to a Service Fabric gateway Partition, which + forwards to the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, run + GetPartitionHealth and check that the report appears in the + HealthEvents section. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param health_information: Describes the health information for the + health report. This information needs to be present in all of the + health reports sent to the health manager. + :type health_information: + ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be + sent immediately. + A health report is sent to a Service Fabric gateway Application, which + forwards to the health store. + If Immediate is set to true, the report is sent immediately from HTTP + Gateway to the health store, regardless of the fabric client settings + that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as + possible. + Depending on timing and other conditions, sending the report may still + fail, for example if the HTTP Gateway is closed or the message doesn't + reach the Gateway. + If Immediate is set to false, the report is sent based on the health + client settings from the HTTP Gateway. Therefore, it will be batched + according to the HealthReportSendInterval configuration. + This is the recommended setting because it allows the health client to + optimize health reporting messages to health store as well as health + report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.report_partition_health.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(health_information, 'HealthInformation') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + report_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/ReportHealth'} + + def get_partition_load_information( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the load information of the specified Service Fabric partition. + + Returns information about the load of a specified partition. + The response includes a list of load reports for a Service Fabric + partition. + Each report includes the load metric name, value, and last reported + time in UTC. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PartitionLoadInformation or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PartitionLoadInformation or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_partition_load_information.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PartitionLoadInformation', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_load_information.metadata = {'url': '/Partitions/{partitionId}/$/GetLoadInformation'} + + def reset_partition_load( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Resets the current load of a Service Fabric partition. + + Resets the current load of a Service Fabric partition to the default + load for the service. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.reset_partition_load.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + reset_partition_load.metadata = {'url': '/Partitions/{partitionId}/$/ResetLoad'} + + def recover_partition( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Indicates to the Service Fabric cluster that it should attempt to + recover a specific partition that is currently stuck in quorum loss. + + This operation should only be performed if it is known that the + replicas that are down cannot be recovered. Incorrect use of this API + can cause potential data loss. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.recover_partition.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + recover_partition.metadata = {'url': '/Partitions/{partitionId}/$/Recover'} + + def recover_service_partitions( + self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Indicates to the Service Fabric cluster that it should attempt to + recover the specified service that is currently stuck in quorum loss. + + Indicates to the Service Fabric cluster that it should attempt to + recover the specified service that is currently stuck in quorum loss. + This operation should only be performed if it is known that the + replicas that are down cannot be recovered. Incorrect use of this API + can cause potential data loss. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.recover_service_partitions.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + recover_service_partitions.metadata = {'url': '/Services/$/{serviceId}/$/GetPartitions/$/Recover'} + + def recover_system_partitions( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Indicates to the Service Fabric cluster that it should attempt to + recover the system services that are currently stuck in quorum loss. + + Indicates to the Service Fabric cluster that it should attempt to + recover the system services that are currently stuck in quorum loss. + This operation should only be performed if it is known that the + replicas that are down cannot be recovered. Incorrect use of this API + can cause potential data loss. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.recover_system_partitions.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + recover_system_partitions.metadata = {'url': '/$/RecoverSystemPartitions'} + + def recover_all_partitions( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Indicates to the Service Fabric cluster that it should attempt to + recover any services (including system services) which are currently + stuck in quorum loss. + + This operation should only be performed if it is known that the + replicas that are down cannot be recovered. Incorrect use of this API + can cause potential data loss. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.recover_all_partitions.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + recover_all_partitions.metadata = {'url': '/$/RecoverAllPartitions'} + + def move_primary_replica( + self, partition_id, node_name=None, ignore_constraints=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Moves the primary replica of a partition of a stateful service. + + This command moves the primary replica of a partition of a stateful + service, respecting all constraints. + If NodeName parameter is specified, primary will be moved to the + specified node (if constraints allow it). + If NodeName parameter is not specified, primary replica will be moved + to a random node in the cluster. + If IgnoreConstraints parameter is specified and set to true, then + primary will be moved regardless of the constraints. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param node_name: The name of the node. + :type node_name: str + :param ignore_constraints: Ignore constraints when moving a replica or + instance. If this parameter is not specified, all constraints are + honored. + :type ignore_constraints: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.5" + + # Construct URL + url = self.move_primary_replica.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if node_name is not None: + query_parameters['NodeName'] = self._serialize.query("node_name", node_name, 'str') + if ignore_constraints is not None: + query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + move_primary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MovePrimaryReplica'} + + def move_secondary_replica( + self, partition_id, current_node_name, new_node_name=None, ignore_constraints=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Moves the secondary replica of a partition of a stateful service. + + This command moves the secondary replica of a partition of a stateful + service, respecting all constraints. + CurrentNodeName parameter must be specified to identify the replica + that is moved. + Source node name must be specified, but new node name can be omitted, + and in that case replica is moved to a random node. + If IgnoreConstraints parameter is specified and set to true, then + secondary will be moved regardless of the constraints. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param current_node_name: The name of the source node for secondary + replica move. + :type current_node_name: str + :param new_node_name: The name of the target node for secondary + replica or instance move. If not specified, replica or instance is + moved to a random node. + :type new_node_name: str + :param ignore_constraints: Ignore constraints when moving a replica or + instance. If this parameter is not specified, all constraints are + honored. + :type ignore_constraints: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.5" + + # Construct URL + url = self.move_secondary_replica.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') + if new_node_name is not None: + query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') + if ignore_constraints is not None: + query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + move_secondary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MoveSecondaryReplica'} + + def update_partition_load( + self, partition_metric_load_description_list, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Update the loads of provided partitions for specific metrics. + + Updates the load value and predicted load value for all the partitions + provided for specified metrics. + + :param partition_metric_load_description_list: Description of updating + load for list of partitions. + :type partition_metric_load_description_list: + list[~azure.servicefabric.models.PartitionMetricLoadDescription] + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedUpdatePartitionLoadResultList or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PagedUpdatePartitionLoadResultList + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "7.2" + + # Construct URL + url = self.update_partition_load.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(partition_metric_load_description_list, '[PartitionMetricLoadDescription]') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedUpdatePartitionLoadResultList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + update_partition_load.metadata = {'url': '/$/UpdatePartitionLoad'} + + def move_instance( + self, service_id, partition_id, current_node_name=None, new_node_name=None, ignore_constraints=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Moves the instance of a partition of a stateless service. + + This command moves the instance of a partition of a stateless service, + respecting all constraints. + Partition id and service name must be specified to be able to move the + instance. + CurrentNodeName when specified identifies the instance that is moved. + If not specified, random instance will be moved + New node name can be omitted, and in that case instance is moved to a + random node. + If IgnoreConstraints parameter is specified and set to true, then + instance will be moved regardless of the constraints. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param current_node_name: The name of the source node for instance + move. If not specified, instance is moved from a random node. + :type current_node_name: str + :param new_node_name: The name of the target node for secondary + replica or instance move. If not specified, replica or instance is + moved to a random node. + :type new_node_name: str + :param ignore_constraints: Ignore constraints when moving a replica or + instance. If this parameter is not specified, all constraints are + honored. + :type ignore_constraints: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "8.0" + + # Construct URL + url = self.move_instance.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if current_node_name is not None: + query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') + if new_node_name is not None: + query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') + if ignore_constraints is not None: + query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + move_instance.metadata = {'url': '/Services/{serviceId}/$/GetPartitions/{partitionId}/$/MoveInstance'} + + def create_repair_task( + self, repair_task, custom_headers=None, raw=False, **operation_config): + """Creates a new repair task. + + For clusters that have the Repair Manager Service configured, + this API provides a way to create repair tasks that run automatically + or manually. + For repair tasks that run automatically, an appropriate repair executor + must be running for each repair action to run automatically. + These are currently only available in specially-configured Azure Cloud + Services. + To create a manual repair task, provide the set of impacted node names + and the + expected impact. When the state of the created repair task changes to + approved, + you can safely perform repair actions on those nodes. + This API supports the Service Fabric platform; it is not meant to be + used directly from your code. + + :param repair_task: Describes the repair task to be created or + updated. + :type repair_task: ~azure.servicefabric.models.RepairTask + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.create_repair_task.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(repair_task, 'RepairTask') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('RepairTaskUpdateInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + create_repair_task.metadata = {'url': '/$/CreateRepairTask'} + + def cancel_repair_task( + self, repair_task_cancel_description, custom_headers=None, raw=False, **operation_config): + """Requests the cancellation of the given repair task. + + This API supports the Service Fabric platform; it is not meant to be + used directly from your code. + + :param repair_task_cancel_description: Describes the repair task to be + cancelled. + :type repair_task_cancel_description: + ~azure.servicefabric.models.RepairTaskCancelDescription + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.cancel_repair_task.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(repair_task_cancel_description, 'RepairTaskCancelDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('RepairTaskUpdateInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + cancel_repair_task.metadata = {'url': '/$/CancelRepairTask'} + + def delete_repair_task( + self, task_id, version=None, custom_headers=None, raw=False, **operation_config): + """Deletes a completed repair task. + + This API supports the Service Fabric platform; it is not meant to be + used directly from your code. + + :param task_id: The ID of the completed repair task to be deleted. + :type task_id: str + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version + check is performed. + :type version: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + repair_task_delete_description = models.RepairTaskDeleteDescription(task_id=task_id, version=version) + + api_version = "6.0" + + # Construct URL + url = self.delete_repair_task.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(repair_task_delete_description, 'RepairTaskDeleteDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_repair_task.metadata = {'url': '/$/DeleteRepairTask'} + + def get_repair_task_list( + self, task_id_filter=None, state_filter=None, executor_filter=None, custom_headers=None, raw=False, **operation_config): + """Gets a list of repair tasks matching the given filters. + + This API supports the Service Fabric platform; it is not meant to be + used directly from your code. + + :param task_id_filter: The repair task ID prefix to be matched. + :type task_id_filter: str + :param state_filter: A bitwise-OR of the following values, specifying + which task states should be included in the result list. + - 1 - Created + - 2 - Claimed + - 4 - Preparing + - 8 - Approved + - 16 - Executing + - 32 - Restoring + - 64 - Completed + :type state_filter: int + :param executor_filter: The name of the repair executor whose claimed + tasks should be included in the list. + :type executor_filter: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.RepairTask] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_repair_task_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if task_id_filter is not None: + query_parameters['TaskIdFilter'] = self._serialize.query("task_id_filter", task_id_filter, 'str') + if state_filter is not None: + query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') + if executor_filter is not None: + query_parameters['ExecutorFilter'] = self._serialize.query("executor_filter", executor_filter, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[RepairTask]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_repair_task_list.metadata = {'url': '/$/GetRepairTaskList'} + + def force_approve_repair_task( + self, task_id, version=None, custom_headers=None, raw=False, **operation_config): + """Forces the approval of the given repair task. + + This API supports the Service Fabric platform; it is not meant to be + used directly from your code. + + :param task_id: The ID of the repair task. + :type task_id: str + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version + check is performed. + :type version: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + repair_task_approve_description = models.RepairTaskApproveDescription(task_id=task_id, version=version) + + api_version = "6.0" + + # Construct URL + url = self.force_approve_repair_task.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(repair_task_approve_description, 'RepairTaskApproveDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('RepairTaskUpdateInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + force_approve_repair_task.metadata = {'url': '/$/ForceApproveRepairTask'} + + def update_repair_task_health_policy( + self, repair_task_update_health_policy_description, custom_headers=None, raw=False, **operation_config): + """Updates the health policy of the given repair task. + + This API supports the Service Fabric platform; it is not meant to be + used directly from your code. + + :param repair_task_update_health_policy_description: Describes the + repair task healthy policy to be updated. + :type repair_task_update_health_policy_description: + ~azure.servicefabric.models.RepairTaskUpdateHealthPolicyDescription + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.update_repair_task_health_policy.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(repair_task_update_health_policy_description, 'RepairTaskUpdateHealthPolicyDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('RepairTaskUpdateInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + update_repair_task_health_policy.metadata = {'url': '/$/UpdateRepairTaskHealthPolicy'} + + def update_repair_execution_state( + self, repair_task, custom_headers=None, raw=False, **operation_config): + """Updates the execution state of a repair task. + + This API supports the Service Fabric platform; it is not meant to be + used directly from your code. + + :param repair_task: Describes the repair task to be created or + updated. + :type repair_task: ~azure.servicefabric.models.RepairTask + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: RepairTaskUpdateInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.update_repair_execution_state.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(repair_task, 'RepairTask') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('RepairTaskUpdateInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + update_repair_execution_state.metadata = {'url': '/$/UpdateRepairExecutionState'} + + def get_replica_info_list( + self, partition_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about replicas of a Service Fabric service + partition. + + The GetReplicas endpoint returns information about the replicas of the + specified partition. The response includes the ID, role, status, + health, node name, uptime, and other details about the replica. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedReplicaInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedReplicaInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_replica_info_list.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedReplicaInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_replica_info_list.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas'} + + def get_replica_info( + self, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about a replica of a Service Fabric partition. + + The response includes the ID, role, status, health, node name, uptime, + and other details about the replica. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ReplicaInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ReplicaInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_replica_info.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ReplicaInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_replica_info.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}'} + + def get_replica_health( + self, partition_id, replica_id, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of a Service Fabric stateful service replica or + stateless service instance. + + Gets the health of a Service Fabric replica. + Use EventsHealthStateFilter to filter the collection of health events + reported on the replica based on the health state. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ReplicaHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ReplicaHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_replica_health.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ReplicaHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} + + def get_replica_health_using_policy( + self, partition_id, replica_id, events_health_state_filter=0, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the health of a Service Fabric stateful service replica or + stateless service instance using the specified policy. + + Gets the health of a Service Fabric stateful service replica or + stateless service instance. + Use EventsHealthStateFilter to filter the collection of health events + reported on the cluster based on the health state. + Use ApplicationHealthPolicy to optionally override the health policies + used to evaluate the health. This API only uses + 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The rest + of the fields are ignored while evaluating the health of the replica. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param application_health_policy: Describes the health policies used + to evaluate the health of an application or one of its children. + If not present, the health evaluation uses the health policy from + application manifest or the default health policy. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ReplicaHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ReplicaHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_replica_health_using_policy.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ReplicaHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_replica_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} + + def report_replica_health( + self, partition_id, replica_id, health_information, service_kind="Stateful", immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Sends a health report on the Service Fabric replica. + + Reports health state of the specified Service Fabric replica. The + report must contain the information about the source of the health + report and property on which it is reported. + The report is sent to a Service Fabric gateway Replica, which forwards + to the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, run + GetReplicaHealth and check that the report appears in the HealthEvents + section. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param service_kind: The kind of service replica (Stateless or + Stateful) for which the health is being reported. Following are the + possible values. Possible values include: 'Stateless', 'Stateful' + :type service_kind: str or + ~azure.servicefabric.models.ReplicaHealthReportServiceKind + :param health_information: Describes the health information for the + health report. This information needs to be present in all of the + health reports sent to the health manager. + :type health_information: + ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be + sent immediately. + A health report is sent to a Service Fabric gateway Application, which + forwards to the health store. + If Immediate is set to true, the report is sent immediately from HTTP + Gateway to the health store, regardless of the fabric client settings + that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as + possible. + Depending on timing and other conditions, sending the report may still + fail, for example if the HTTP Gateway is closed or the message doesn't + reach the Gateway. + If Immediate is set to false, the report is sent based on the health + client settings from the HTTP Gateway. Therefore, it will be batched + according to the HealthReportSendInterval configuration. + This is the recommended setting because it allows the health client to + optimize health reporting messages to health store as well as health + report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.report_replica_health.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceKind'] = self._serialize.query("service_kind", service_kind, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(health_information, 'HealthInformation') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + report_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/ReportHealth'} + + def get_deployed_service_replica_info_list( + self, node_name, application_id, partition_id=None, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of replicas deployed on a Service Fabric node. + + Gets the list containing the information about replicas deployed on a + Service Fabric node. The information include partition ID, replica ID, + status of the replica, name of the service, name of the service type, + and other information. Use PartitionId or ServiceManifestName query + parameters to return information about the deployed replicas matching + the specified values for those parameters. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param service_manifest_name: The name of a service manifest + registered as part of an application type in a Service Fabric cluster. + :type service_manifest_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.DeployedServiceReplicaInfo] + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_service_replica_info_list.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if partition_id is not None: + query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServiceReplicaInfo]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_service_replica_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetReplicas'} + + def get_deployed_service_replica_detail_info( + self, node_name, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the details of replica deployed on a Service Fabric node. + + Gets the details of the replica deployed on a Service Fabric node. The + information includes service kind, service name, current service + operation, current service operation start date time, partition ID, + replica/instance ID, reported load, and other information. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: DeployedServiceReplicaDetailInfo or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_service_replica_detail_info.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_service_replica_detail_info.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetDetail'} + + def get_deployed_service_replica_detail_info_by_partition_id( + self, node_name, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the details of replica deployed on a Service Fabric node. + + Gets the details of the replica deployed on a Service Fabric node. The + information includes service kind, service name, current service + operation, current service operation start date time, partition ID, + replica/instance ID, reported load, and other information. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: DeployedServiceReplicaDetailInfo or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_service_replica_detail_info_by_partition_id.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_service_replica_detail_info_by_partition_id.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas'} + + def restart_replica( + self, node_name, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Restarts a service replica of a persisted service running on a node. + + Restarts a service replica of a persisted service running on a node. + Warning - There are no safety checks performed when this API is used. + Incorrect use of this API can lead to availability loss for stateful + services. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.restart_replica.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + restart_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Restart'} + + def remove_replica( + self, node_name, partition_id, replica_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Removes a service replica running on a node. + + This API simulates a Service Fabric replica failure by removing a + replica from a Service Fabric cluster. The removal closes the replica, + transitions the replica to the role None, and then removes all of the + state information of the replica from the cluster. This API tests the + replica state removal path, and simulates the report fault permanent + path through client APIs. Warning - There are no safety checks + performed when this API is used. Incorrect use of this API can lead to + data loss for stateful services. In addition, the forceRemove flag + impacts all other replicas hosted in the same process. + + :param node_name: The name of the node. + :type node_name: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param force_remove: Remove a Service Fabric application or service + forcefully without going through the graceful shutdown sequence. This + parameter can be used to forcefully delete an application or service + for which delete is timing out due to issues in the service code that + prevents graceful close of replicas. + :type force_remove: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.remove_replica.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if force_remove is not None: + query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + remove_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Delete'} + + def get_deployed_service_package_info_list( + self, node_name, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of service packages deployed on a Service Fabric node. + + Returns the information about the service packages deployed on a + Service Fabric node for the given application. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_service_package_info_list.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServicePackageInfo]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_service_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages'} + + def get_deployed_service_package_info_list_by_name( + self, node_name, application_id, service_package_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of service packages deployed on a Service Fabric node + matching exactly the specified name. + + Returns the information about the service packages deployed on a + Service Fabric node for the given application. These results are of + service packages whose name match exactly the service package name + specified as the parameter. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_service_package_info_list_by_name.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedServicePackageInfo]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_service_package_info_list_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}'} + + def get_deployed_service_package_health( + self, node_name, application_id, service_package_name, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about health of a service package for a specific + application deployed for a Service Fabric node and application. + + Gets the information about health of a service package for a specific + application deployed on a Service Fabric node. Use + EventsHealthStateFilter to optionally filter for the collection of + HealthEvent objects reported on the deployed service package based on + health state. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: DeployedServicePackageHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_service_package_health.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DeployedServicePackageHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} + + def get_deployed_service_package_health_using_policy( + self, node_name, application_id, service_package_name, events_health_state_filter=0, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about health of service package for a specific + application deployed on a Service Fabric node using the specified + policy. + + Gets the information about health of a service package for a specific + application deployed on a Service Fabric node. using the specified + policy. Use EventsHealthStateFilter to optionally filter for the + collection of HealthEvent objects reported on the deployed service + package based on health state. Use ApplicationHealthPolicy to + optionally override the health policies used to evaluate the health. + This API only uses 'ConsiderWarningAsError' field of the + ApplicationHealthPolicy. The rest of the fields are ignored while + evaluating the health of the deployed service package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param events_health_state_filter: Allows filtering the collection of + HealthEvent objects returned based on health state. + The possible values for this parameter include integer value of one of + the following health states. + Only events that match the filter are returned. All events are used to + evaluate the aggregated health state. + If not specified, all entries are returned. The state values are + flag-based enumeration, so the value could be a combination of these + values, obtained using the bitwise 'OR' operator. For example, If the + provided value is 6 then all of the events with HealthState value of + OK (2) and Warning (4) are returned. + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in + order to return no results on a given collection of states. The value + is 1. + - Ok - Filter that matches input with HealthState value Ok. The value + is 2. + - Warning - Filter that matches input with HealthState value Warning. + The value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The + value is 65535. + :type events_health_state_filter: int + :param application_health_policy: Describes the health policies used + to evaluate the health of an application or one of its children. + If not present, the health evaluation uses the health policy from + application manifest or the default health policy. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: DeployedServicePackageHealth or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_service_package_health_using_policy.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if events_health_state_filter is not None: + query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if application_health_policy is not None: + body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DeployedServicePackageHealth', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_service_package_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} + + def report_deployed_service_package_health( + self, node_name, application_id, service_package_name, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Sends a health report on the Service Fabric deployed service package. + + Reports health state of the service package of the application deployed + on a Service Fabric node. The report must contain the information about + the source of the health report and property on which it is reported. + The report is sent to a Service Fabric gateway Service, which forwards + to the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, get deployed + service package health and check that the report appears in the + HealthEvents section. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_package_name: The name of the service package. + :type service_package_name: str + :param health_information: Describes the health information for the + health report. This information needs to be present in all of the + health reports sent to the health manager. + :type health_information: + ~azure.servicefabric.models.HealthInformation + :param immediate: A flag that indicates whether the report should be + sent immediately. + A health report is sent to a Service Fabric gateway Application, which + forwards to the health store. + If Immediate is set to true, the report is sent immediately from HTTP + Gateway to the health store, regardless of the fabric client settings + that the HTTP Gateway Application is using. + This is useful for critical reports that should be sent as soon as + possible. + Depending on timing and other conditions, sending the report may still + fail, for example if the HTTP Gateway is closed or the message doesn't + reach the Gateway. + If Immediate is set to false, the report is sent based on the health + client settings from the HTTP Gateway. Therefore, it will be batched + according to the HealthReportSendInterval configuration. + This is the recommended setting because it allows the health client to + optimize health reporting messages to health store as well as health + report processing. + By default, reports are not sent immediately. + :type immediate: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.report_deployed_service_package_health.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), + 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if immediate is not None: + query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(health_information, 'HealthInformation') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + report_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/ReportHealth'} + + def deploy_service_package_to_node( + self, node_name, deploy_service_package_to_node_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Downloads all of the code packages associated with specified service + manifest on the specified node. + + This API provides a way to download code packages including the + container images on a specific node outside of the normal application + deployment and upgrade path. This is useful for the large code packages + and container images to be present on the node before the actual + application deployment and upgrade, thus significantly reducing the + total time required for the deployment or upgrade. + + :param node_name: The name of the node. + :type node_name: str + :param deploy_service_package_to_node_description: Describes + information for deploying a service package to a Service Fabric node. + :type deploy_service_package_to_node_description: + ~azure.servicefabric.models.DeployServicePackageToNodeDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.deploy_service_package_to_node.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(deploy_service_package_to_node_description, 'DeployServicePackageToNodeDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + deploy_service_package_to_node.metadata = {'url': '/Nodes/{nodeName}/$/DeployServicePackage'} + + def get_deployed_code_package_info_list( + self, node_name, application_id, service_manifest_name=None, code_package_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of code packages deployed on a Service Fabric node. + + Gets the list of code packages deployed on a Service Fabric node for + the given application. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest + registered as part of an application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in + service manifest registered as part of an application type in a + Service Fabric cluster. + :type code_package_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.DeployedCodePackageInfo] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_deployed_code_package_info_list.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if service_manifest_name is not None: + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + if code_package_name is not None: + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[DeployedCodePackageInfo]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_deployed_code_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages'} + + def restart_deployed_code_package( + self, node_name, application_id, restart_deployed_code_package_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Restarts a code package deployed on a Service Fabric node in a cluster. + + Restarts a code package deployed on a Service Fabric node in a cluster. + This aborts the code package process, which will restart all the user + service replicas hosted in that process. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param restart_deployed_code_package_description: Describes the + deployed code package on Service Fabric node to restart. + :type restart_deployed_code_package_description: + ~azure.servicefabric.models.RestartDeployedCodePackageDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.restart_deployed_code_package.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(restart_deployed_code_package_description, 'RestartDeployedCodePackageDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + restart_deployed_code_package.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/Restart'} + + def get_container_logs_deployed_on_node( + self, node_name, application_id, service_manifest_name, code_package_name, tail=None, previous=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the container logs for container deployed on a Service Fabric + node. + + Gets the container logs for container deployed on a Service Fabric node + for the given code package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest + registered as part of an application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in + service manifest registered as part of an application type in a + Service Fabric cluster. + :type code_package_name: str + :param tail: Number of lines to show from the end of the logs. Default + is 100. 'all' to show the complete logs. + :type tail: str + :param previous: Specifies whether to get container logs from + exited/dead containers of the code package instance. + :type previous: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ContainerLogs or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ContainerLogs or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.get_container_logs_deployed_on_node.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + if tail is not None: + query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') + if previous is not None: + query_parameters['Previous'] = self._serialize.query("previous", previous, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ContainerLogs', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_container_logs_deployed_on_node.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerLogs'} + + def invoke_container_api( + self, node_name, application_id, service_manifest_name, code_package_name, code_package_instance_id, container_api_request_body, timeout=60, custom_headers=None, raw=False, **operation_config): + """Invoke container API on a container deployed on a Service Fabric node. + + Invoke container API on a container deployed on a Service Fabric node + for the given code package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest + registered as part of an application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in + service manifest registered as part of an application type in a + Service Fabric cluster. + :type code_package_name: str + :param code_package_instance_id: ID that uniquely identifies a code + package instance deployed on a service fabric node. + :type code_package_instance_id: str + :param container_api_request_body: Parameters for making container API + call + :type container_api_request_body: + ~azure.servicefabric.models.ContainerApiRequestBody + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ContainerApiResponse or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ContainerApiResponse or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.invoke_container_api.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + query_parameters['CodePackageInstanceId'] = self._serialize.query("code_package_instance_id", code_package_instance_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(container_api_request_body, 'ContainerApiRequestBody') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ContainerApiResponse', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + invoke_container_api.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerApi'} + + def create_compose_deployment( + self, create_compose_deployment_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Creates a Service Fabric compose deployment. + + Compose is a file format that describes multi-container applications. + This API allows deploying container based applications defined in + compose format in a Service Fabric cluster. Once the deployment is + created, its status can be tracked via the `GetComposeDeploymentStatus` + API. + + :param create_compose_deployment_description: Describes the compose + deployment that needs to be created. + :type create_compose_deployment_description: + ~azure.servicefabric.models.CreateComposeDeploymentDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0-preview" + + # Construct URL + url = self.create_compose_deployment.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(create_compose_deployment_description, 'CreateComposeDeploymentDescription') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + create_compose_deployment.metadata = {'url': '/ComposeDeployments/$/Create'} + + def get_compose_deployment_status( + self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets information about a Service Fabric compose deployment. + + Returns the status of the compose deployment that was created or in the + process of being created in the Service Fabric cluster and whose name + matches the one specified as the parameter. The response includes the + name, status, and other details about the deployment. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComposeDeploymentStatusInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ComposeDeploymentStatusInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0-preview" + + # Construct URL + url = self.get_compose_deployment_status.metadata['url'] + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ComposeDeploymentStatusInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_compose_deployment_status.metadata = {'url': '/ComposeDeployments/{deploymentName}'} + + def get_compose_deployment_status_list( + self, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of compose deployments created in the Service Fabric + cluster. + + Gets the status about the compose deployments that were created or in + the process of being created in the Service Fabric cluster. The + response includes the name, status, and other details about the compose + deployments. If the list of deployments do not fit in a page, one page + of results is returned as well as a continuation token, which can be + used to get the next page. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedComposeDeploymentStatusInfoList or ClientRawResponse if + raw=true + :rtype: + ~azure.servicefabric.models.PagedComposeDeploymentStatusInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0-preview" + + # Construct URL + url = self.get_compose_deployment_status_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedComposeDeploymentStatusInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_compose_deployment_status_list.metadata = {'url': '/ComposeDeployments'} + + def get_compose_deployment_upgrade_progress( + self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets details for the latest upgrade performed on this Service Fabric + compose deployment. + + Returns the information about the state of the compose deployment + upgrade along with details to aid debugging application health issues. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComposeDeploymentUpgradeProgressInfo or ClientRawResponse if + raw=true + :rtype: + ~azure.servicefabric.models.ComposeDeploymentUpgradeProgressInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0-preview" + + # Construct URL + url = self.get_compose_deployment_upgrade_progress.metadata['url'] + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ComposeDeploymentUpgradeProgressInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_compose_deployment_upgrade_progress.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/GetUpgradeProgress'} + + def remove_compose_deployment( + self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes an existing Service Fabric compose deployment from cluster. + + Deletes an existing Service Fabric compose deployment. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0-preview" + + # Construct URL + url = self.remove_compose_deployment.metadata['url'] + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + remove_compose_deployment.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Delete'} + + def start_compose_deployment_upgrade( + self, deployment_name, compose_deployment_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Starts upgrading a compose deployment in the Service Fabric cluster. + + Validates the supplied upgrade parameters and starts upgrading the + deployment if the parameters are valid. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param compose_deployment_upgrade_description: Parameters for + upgrading compose deployment. + :type compose_deployment_upgrade_description: + ~azure.servicefabric.models.ComposeDeploymentUpgradeDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0-preview" + + # Construct URL + url = self.start_compose_deployment_upgrade.metadata['url'] + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(compose_deployment_upgrade_description, 'ComposeDeploymentUpgradeDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Upgrade'} + + def start_rollback_compose_deployment_upgrade( + self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Starts rolling back a compose deployment upgrade in the Service Fabric + cluster. + + Rollback a service fabric compose deployment upgrade. + + :param deployment_name: The identity of the deployment. + :type deployment_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4-preview" + + # Construct URL + url = self.start_rollback_compose_deployment_upgrade.metadata['url'] + path_format_arguments = { + 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_rollback_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/RollbackUpgrade'} + + def get_chaos( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the status of Chaos. + + Get the status of Chaos indicating whether or not Chaos is running, the + Chaos parameters used for running Chaos and the status of the Chaos + Schedule. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: Chaos or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.Chaos or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.get_chaos.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Chaos', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_chaos.metadata = {'url': '/Tools/Chaos'} + + def start_chaos( + self, chaos_parameters, timeout=60, custom_headers=None, raw=False, **operation_config): + """Starts Chaos in the cluster. + + If Chaos is not already running in the cluster, it starts Chaos with + the passed in Chaos parameters. + If Chaos is already running when this call is made, the call fails with + the error code FABRIC_E_CHAOS_ALREADY_RUNNING. + Refer to the article [Induce controlled Chaos in Service Fabric + clusters](https://docs.microsoft.com/azure/service-fabric/service-fabric-controlled-chaos) + for more details. + + :param chaos_parameters: Describes all the parameters to configure a + Chaos run. + :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.start_chaos.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(chaos_parameters, 'ChaosParameters') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_chaos.metadata = {'url': '/Tools/Chaos/$/Start'} + + def stop_chaos( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Stops Chaos if it is running in the cluster and put the Chaos Schedule + in a stopped state. + + Stops Chaos from executing new faults. In-flight faults will continue + to execute until they are complete. The current Chaos Schedule is put + into a stopped state. + Once a schedule is stopped, it will stay in the stopped state and not + be used to Chaos Schedule new runs of Chaos. A new Chaos Schedule must + be set in order to resume scheduling. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.stop_chaos.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + stop_chaos.metadata = {'url': '/Tools/Chaos/$/Stop'} + + def get_chaos_events( + self, continuation_token=None, start_time_utc=None, end_time_utc=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the next segment of the Chaos events based on the continuation + token or the time range. + + To get the next segment of the Chaos events, you can specify the + ContinuationToken. To get the start of a new segment of Chaos events, + you can specify the time range + through StartTimeUtc and EndTimeUtc. You cannot specify both the + ContinuationToken and the time range in the same call. + When there are more than 100 Chaos events, the Chaos events are + returned in multiple segments where a segment contains no more than 100 + Chaos events and to get the next segment you make a call to this API + with the continuation token. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param start_time_utc: The Windows file time representing the start + time of the time range for which a Chaos report is to be generated. + Consult [DateTime.ToFileTimeUtc + Method](https://msdn.microsoft.com/library/system.datetime.tofiletimeutc(v=vs.110).aspx) + for details. + :type start_time_utc: str + :param end_time_utc: The Windows file time representing the end time + of the time range for which a Chaos report is to be generated. Consult + [DateTime.ToFileTimeUtc + Method](https://msdn.microsoft.com/library/system.datetime.tofiletimeutc(v=vs.110).aspx) + for details. + :type end_time_utc: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ChaosEventsSegment or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ChaosEventsSegment or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.get_chaos_events.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if start_time_utc is not None: + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + if end_time_utc is not None: + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ChaosEventsSegment', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_chaos_events.metadata = {'url': '/Tools/Chaos/Events'} + + def get_chaos_schedule( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the Chaos Schedule defining when and how to run Chaos. + + Gets the version of the Chaos Schedule in use and the Chaos Schedule + that defines when and how to run Chaos. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ChaosScheduleDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ChaosScheduleDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.get_chaos_schedule.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ChaosScheduleDescription', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} + + def post_chaos_schedule( + self, timeout=60, version=None, schedule=None, custom_headers=None, raw=False, **operation_config): + """Set the schedule used by Chaos. + + Chaos will automatically schedule runs based on the Chaos Schedule. + The Chaos Schedule will be updated if the provided version matches the + version on the server. + When updating the Chaos Schedule, the version on the server is + incremented by 1. + The version on the server will wrap back to 0 after reaching a large + number. + If Chaos is running when this call is made, the call will fail. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param version: The version number of the Schedule. + :type version: int + :param schedule: Defines the schedule used by Chaos. + :type schedule: ~azure.servicefabric.models.ChaosSchedule + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + chaos_schedule = models.ChaosScheduleDescription(version=version, schedule=schedule) + + api_version = "6.2" + + # Construct URL + url = self.post_chaos_schedule.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(chaos_schedule, 'ChaosScheduleDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + post_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} + + def upload_file( + self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): + """Uploads contents of the file to the image store. + + Uploads contents of the file to the image store. Use this API if the + file is small enough to upload again if the connection fails. The + file's data needs to be added to the request body. The contents will be + uploaded to the specified path. Image store service uses a mark file to + indicate the availability of the folder. The mark file is an empty file + named "_.dir". The mark file is generated by the image store service + when all files in a folder are uploaded. When using File-by-File + approach to upload application package in REST, the image store service + isn't aware of the file hierarchy of the application package; you need + to create a mark file per folder and upload it last, to let the image + store service know that the folder is complete. + + :param content_path: Relative path to file or folder in the image + store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.upload_file.metadata['url'] + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + upload_file.metadata = {'url': '/ImageStore/{contentPath}'} + + def get_image_store_content( + self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the image store content information. + + Returns the information about the image store content at the specified + contentPath. The contentPath is relative to the root of the image + store. + + :param content_path: Relative path to file or folder in the image + store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ImageStoreContent or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ImageStoreContent or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.get_image_store_content.metadata['url'] + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ImageStoreContent', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} + + def delete_image_store_content( + self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes existing image store content. + + Deletes existing image store content being found within the given image + store relative path. This command can be used to delete uploaded + application packages once they are provisioned. + + :param content_path: Relative path to file or folder in the image + store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.delete_image_store_content.metadata['url'] + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} + + def get_image_store_root_content( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the content information at the root of the image store. + + Returns the information about the image store content at the root of + the image store. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ImageStoreContent or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ImageStoreContent or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_image_store_root_content.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ImageStoreContent', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_image_store_root_content.metadata = {'url': '/ImageStore'} + + def copy_image_store_content( + self, image_store_copy_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Copies image store content internally. + + Copies the image store content from the source image store relative + path to the destination image store relative path. + + :param image_store_copy_description: Describes the copy description + for the image store. + :type image_store_copy_description: + ~azure.servicefabric.models.ImageStoreCopyDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.copy_image_store_content.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(image_store_copy_description, 'ImageStoreCopyDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + copy_image_store_content.metadata = {'url': '/ImageStore/$/Copy'} + + def delete_image_store_upload_session( + self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Cancels an image store upload session. + + The DELETE request will cause the existing upload session to expire and + remove any previously uploaded file chunks. + + :param session_id: A GUID generated by the user for a file uploading. + It identifies an image store upload session which keeps track of all + file chunks until it is committed. + :type session_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.delete_image_store_upload_session.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_image_store_upload_session.metadata = {'url': '/ImageStore/$/DeleteUploadSession'} + + def commit_image_store_upload_session( + self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Commit an image store upload session. + + When all file chunks have been uploaded, the upload session needs to be + committed explicitly to complete the upload. Image store preserves the + upload session until the expiration time, which is 30 minutes after the + last chunk received. . + + :param session_id: A GUID generated by the user for a file uploading. + It identifies an image store upload session which keeps track of all + file chunks until it is committed. + :type session_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.commit_image_store_upload_session.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + commit_image_store_upload_session.metadata = {'url': '/ImageStore/$/CommitUploadSession'} + + def get_image_store_upload_session_by_id( + self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the image store upload session by ID. + + Gets the image store upload session identified by the given ID. User + can query the upload session at any time during uploading. . + + :param session_id: A GUID generated by the user for a file uploading. + It identifies an image store upload session which keeps track of all + file chunks until it is committed. + :type session_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: UploadSession or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.UploadSession or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_image_store_upload_session_by_id.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('UploadSession', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_image_store_upload_session_by_id.metadata = {'url': '/ImageStore/$/GetUploadSession'} + + def get_image_store_upload_session_by_path( + self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the image store upload session by relative path. + + Gets the image store upload session associated with the given image + store relative path. User can query the upload session at any time + during uploading. . + + :param content_path: Relative path to file or folder in the image + store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: UploadSession or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.UploadSession or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_image_store_upload_session_by_path.metadata['url'] + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('UploadSession', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_image_store_upload_session_by_path.metadata = {'url': '/ImageStore/{contentPath}/$/GetUploadSession'} + + def upload_file_chunk( + self, content_path, session_id, content_range, timeout=60, custom_headers=None, raw=False, **operation_config): + """Uploads a file chunk to the image store relative path. + + Uploads a file chunk to the image store with the specified upload + session ID and image store relative path. This API allows user to + resume the file upload operation. user doesn't have to restart the file + upload from scratch whenever there is a network interruption. Use this + option if the file size is large. + To perform a resumable file upload, user need to break the file into + multiple chunks and upload these chunks to the image store one-by-one. + Chunks don't have to be uploaded in order. If the file represented by + the image store relative path already exists, it will be overwritten + when the upload session commits. + + :param content_path: Relative path to file or folder in the image + store from its root. + :type content_path: str + :param session_id: A GUID generated by the user for a file uploading. + It identifies an image store upload session which keeps track of all + file chunks until it is committed. + :type session_id: str + :param content_range: When uploading file chunks to the image store, + the Content-Range header field need to be configured and sent with a + request. The format should looks like "bytes + {First-Byte-Position}-{Last-Byte-Position}/{File-Length}". For + example, Content-Range:bytes 300-5000/20000 indicates that user is + sending bytes 300 through 5,000 and the total file length is 20,000 + bytes. + :type content_range: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.upload_file_chunk.metadata['url'] + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + header_parameters['Content-Range'] = self._serialize.header("content_range", content_range, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + upload_file_chunk.metadata = {'url': '/ImageStore/{contentPath}/$/UploadChunk'} + + def get_image_store_root_folder_size( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the folder size at the root of the image store. + + Returns the total size of files at the root and children folders in + image store. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: FolderSizeInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.FolderSizeInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.5" + + # Construct URL + url = self.get_image_store_root_folder_size.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FolderSizeInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_image_store_root_folder_size.metadata = {'url': '/ImageStore/$/FolderSize'} + + def get_image_store_folder_size( + self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the size of a folder in image store. + + Gets the total size of file under a image store folder, specified by + contentPath. The contentPath is relative to the root of the image + store. + + :param content_path: Relative path to file or folder in the image + store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: FolderSizeInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.FolderSizeInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.5" + + # Construct URL + url = self.get_image_store_folder_size.metadata['url'] + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FolderSizeInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_image_store_folder_size.metadata = {'url': '/ImageStore/{contentPath}/$/FolderSize'} + + def get_image_store_info( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the overall ImageStore information. + + Returns information about the primary ImageStore replica, such as disk + capacity and available disk space at the node it is on, and several + categories of the ImageStore's file system usage. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ImageStoreInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ImageStoreInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.5" + + # Construct URL + url = self.get_image_store_info.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ImageStoreInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_image_store_info.metadata = {'url': '/ImageStore/$/Info'} + + def invoke_infrastructure_command( + self, command, service_id=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Invokes an administrative command on the given Infrastructure Service + instance. + + For clusters that have one or more instances of the Infrastructure + Service configured, + this API provides a way to send infrastructure-specific commands to a + particular + instance of the Infrastructure Service. + Available commands and their corresponding response formats vary + depending upon + the infrastructure on which the cluster is running. + This API supports the Service Fabric platform; it is not meant to be + used directly from your code. + + :param command: The text of the command to be invoked. The content of + the command is infrastructure-specific. + :type command: str + :param service_id: The identity of the infrastructure service. This is + the full name of the infrastructure service without the 'fabric:' URI + scheme. This parameter required only for the cluster that has more + than one instance of infrastructure service running. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: str or ClientRawResponse if raw=true + :rtype: str or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.invoke_infrastructure_command.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['Command'] = self._serialize.query("command", command, 'str') + if service_id is not None: + query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('str', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + invoke_infrastructure_command.metadata = {'url': '/$/InvokeInfrastructureCommand'} + + def invoke_infrastructure_query( + self, command, service_id=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Invokes a read-only query on the given infrastructure service instance. + + For clusters that have one or more instances of the Infrastructure + Service configured, + this API provides a way to send infrastructure-specific queries to a + particular + instance of the Infrastructure Service. + Available commands and their corresponding response formats vary + depending upon + the infrastructure on which the cluster is running. + This API supports the Service Fabric platform; it is not meant to be + used directly from your code. + + :param command: The text of the command to be invoked. The content of + the command is infrastructure-specific. + :type command: str + :param service_id: The identity of the infrastructure service. This is + the full name of the infrastructure service without the 'fabric:' URI + scheme. This parameter required only for the cluster that has more + than one instance of infrastructure service running. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: str or ClientRawResponse if raw=true + :rtype: str or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.invoke_infrastructure_query.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['Command'] = self._serialize.query("command", command, 'str') + if service_id is not None: + query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('str', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + invoke_infrastructure_query.metadata = {'url': '/$/InvokeInfrastructureQuery'} + + def start_data_loss( + self, service_id, partition_id, operation_id, data_loss_mode, timeout=60, custom_headers=None, raw=False, **operation_config): + """This API will induce data loss for the specified partition. It will + trigger a call to the OnDataLossAsync API of the partition. + + This API will induce data loss for the specified partition. It will + trigger a call to the OnDataLoss API of the partition. + Actual data loss will depend on the specified DataLossMode. + - PartialDataLoss - Only a quorum of replicas are removed and + OnDataLoss is triggered for the partition but actual data loss depends + on the presence of in-flight replication. + - FullDataLoss - All replicas are removed hence all data is lost and + OnDataLoss is triggered. + This API should only be called with a stateful service as the target. + Calling this API with a system service as the target is not advised. + Note: Once this API has been called, it cannot be reversed. Calling + CancelOperation will only stop execution and clean up internal system + state. + It will not restore data if the command has progressed far enough to + cause data loss. + Call the GetDataLossProgress API with the same OperationId to return + information on the operation started with this API. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This + is passed into the corresponding GetProgress API + :type operation_id: str + :param data_loss_mode: This enum is passed to the StartDataLoss API to + indicate what type of data loss to induce. Possible values include: + 'Invalid', 'PartialDataLoss', 'FullDataLoss' + :type data_loss_mode: str or ~azure.servicefabric.models.DataLossMode + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.start_data_loss.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['DataLossMode'] = self._serialize.query("data_loss_mode", data_loss_mode, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_data_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartDataLoss'} + + def get_data_loss_progress( + self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the progress of a partition data loss operation started using the + StartDataLoss API. + + Gets the progress of a data loss operation started with StartDataLoss, + using the OperationId. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This + is passed into the corresponding GetProgress API + :type operation_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PartitionDataLossProgress or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PartitionDataLossProgress or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_data_loss_progress.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PartitionDataLossProgress', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_data_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetDataLossProgress'} + + def start_quorum_loss( + self, service_id, partition_id, operation_id, quorum_loss_mode, quorum_loss_duration, timeout=60, custom_headers=None, raw=False, **operation_config): + """Induces quorum loss for a given stateful service partition. + + This API is useful for a temporary quorum loss situation on your + service. + Call the GetQuorumLossProgress API with the same OperationId to return + information on the operation started with this API. + This can only be called on stateful persisted (HasPersistedState==true) + services. Do not use this API on stateless services or stateful + in-memory only services. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This + is passed into the corresponding GetProgress API + :type operation_id: str + :param quorum_loss_mode: This enum is passed to the StartQuorumLoss + API to indicate what type of quorum loss to induce. Possible values + include: 'Invalid', 'QuorumReplicas', 'AllReplicas' + :type quorum_loss_mode: str or + ~azure.servicefabric.models.QuorumLossMode + :param quorum_loss_duration: The amount of time for which the + partition will be kept in quorum loss. This must be specified in + seconds. + :type quorum_loss_duration: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.start_quorum_loss.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['QuorumLossMode'] = self._serialize.query("quorum_loss_mode", quorum_loss_mode, 'str') + query_parameters['QuorumLossDuration'] = self._serialize.query("quorum_loss_duration", quorum_loss_duration, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_quorum_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartQuorumLoss'} + + def get_quorum_loss_progress( + self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the progress of a quorum loss operation on a partition started + using the StartQuorumLoss API. + + Gets the progress of a quorum loss operation started with + StartQuorumLoss, using the provided OperationId. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This + is passed into the corresponding GetProgress API + :type operation_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PartitionQuorumLossProgress or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PartitionQuorumLossProgress or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_quorum_loss_progress.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PartitionQuorumLossProgress', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_quorum_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetQuorumLossProgress'} + + def start_partition_restart( + self, service_id, partition_id, operation_id, restart_partition_mode, timeout=60, custom_headers=None, raw=False, **operation_config): + """This API will restart some or all replicas or instances of the + specified partition. + + This API is useful for testing failover. + If used to target a stateless service partition, RestartPartitionMode + must be AllReplicasOrInstances. + Call the GetPartitionRestartProgress API using the same OperationId to + get the progress. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This + is passed into the corresponding GetProgress API + :type operation_id: str + :param restart_partition_mode: Describe which partitions to restart. + Possible values include: 'Invalid', 'AllReplicasOrInstances', + 'OnlyActiveSecondaries' + :type restart_partition_mode: str or + ~azure.servicefabric.models.RestartPartitionMode + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.start_partition_restart.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['RestartPartitionMode'] = self._serialize.query("restart_partition_mode", restart_partition_mode, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_partition_restart.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartRestart'} + + def get_partition_restart_progress( + self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the progress of a PartitionRestart operation started using + StartPartitionRestart. + + Gets the progress of a PartitionRestart started with + StartPartitionRestart using the provided OperationId. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param partition_id: The identity of the partition. + :type partition_id: str + :param operation_id: A GUID that identifies a call of this API. This + is passed into the corresponding GetProgress API + :type operation_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PartitionRestartProgress or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PartitionRestartProgress or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_partition_restart_progress.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PartitionRestartProgress', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_restart_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetRestartProgress'} + + def start_node_transition( + self, node_name, operation_id, node_transition_type, node_instance_id, stop_duration_in_seconds, timeout=60, custom_headers=None, raw=False, **operation_config): + """Starts or stops a cluster node. + + Starts or stops a cluster node. A cluster node is a process, not the + OS instance itself. To start a node, pass in "Start" for the + NodeTransitionType parameter. + To stop a node, pass in "Stop" for the NodeTransitionType parameter. + This API starts the operation - when the API returns the node may not + have finished transitioning yet. + Call GetNodeTransitionProgress with the same OperationId to get the + progress of the operation. + + :param node_name: The name of the node. + :type node_name: str + :param operation_id: A GUID that identifies a call of this API. This + is passed into the corresponding GetProgress API + :type operation_id: str + :param node_transition_type: Indicates the type of transition to + perform. NodeTransitionType.Start will start a stopped node. + NodeTransitionType.Stop will stop a node that is up. Possible values + include: 'Invalid', 'Start', 'Stop' + :type node_transition_type: str or + ~azure.servicefabric.models.NodeTransitionType + :param node_instance_id: The node instance ID of the target node. + This can be determined through GetNodeInfo API. + :type node_instance_id: str + :param stop_duration_in_seconds: The duration, in seconds, to keep the + node stopped. The minimum value is 600, the maximum is 14400. After + this time expires, the node will automatically come back up. + :type stop_duration_in_seconds: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.start_node_transition.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['NodeTransitionType'] = self._serialize.query("node_transition_type", node_transition_type, 'str') + query_parameters['NodeInstanceId'] = self._serialize.query("node_instance_id", node_instance_id, 'str') + query_parameters['StopDurationInSeconds'] = self._serialize.query("stop_duration_in_seconds", stop_duration_in_seconds, 'int', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + start_node_transition.metadata = {'url': '/Faults/Nodes/{nodeName}/$/StartTransition/'} + + def get_node_transition_progress( + self, node_name, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the progress of an operation started using StartNodeTransition. + + Gets the progress of an operation started with StartNodeTransition + using the provided OperationId. + + :param node_name: The name of the node. + :type node_name: str + :param operation_id: A GUID that identifies a call of this API. This + is passed into the corresponding GetProgress API + :type operation_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: NodeTransitionProgress or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.NodeTransitionProgress or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_node_transition_progress.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('NodeTransitionProgress', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_node_transition_progress.metadata = {'url': '/Faults/Nodes/{nodeName}/$/GetTransitionProgress'} + + def get_fault_operation_list( + self, type_filter=65535, state_filter=65535, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets a list of user-induced fault operations filtered by provided + input. + + Gets the list of user-induced fault operations filtered by provided + input. + + :param type_filter: Used to filter on OperationType for user-induced + operations. + - 65535 - select all + - 1 - select PartitionDataLoss. + - 2 - select PartitionQuorumLoss. + - 4 - select PartitionRestart. + - 8 - select NodeTransition. + :type type_filter: int + :param state_filter: Used to filter on OperationState's for + user-induced operations. + - 65535 - select All + - 1 - select Running + - 2 - select RollingBack + - 8 - select Completed + - 16 - select Faulted + - 32 - select Cancelled + - 64 - select ForceCancelled + :type state_filter: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.OperationStatus] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_fault_operation_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['TypeFilter'] = self._serialize.query("type_filter", type_filter, 'int') + query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[OperationStatus]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_fault_operation_list.metadata = {'url': '/Faults/'} + + def cancel_operation( + self, operation_id, force=False, timeout=60, custom_headers=None, raw=False, **operation_config): + """Cancels a user-induced fault operation. + + The following APIs start fault operations that may be cancelled by + using CancelOperation: StartDataLoss, StartQuorumLoss, + StartPartitionRestart, StartNodeTransition. + If force is false, then the specified user-induced operation will be + gracefully stopped and cleaned up. If force is true, the command will + be aborted, and some internal state + may be left behind. Specifying force as true should be used with care. + Calling this API with force set to true is not allowed until this API + has already + been called on the same test command with force set to false first, or + unless the test command already has an OperationState of + OperationState.RollingBack. + Clarification: OperationState.RollingBack means that the system will + be/is cleaning up internal system state caused by executing the + command. It will not restore data if the + test command was to cause data loss. For example, if you call + StartDataLoss then call this API, the system will only clean up + internal state from running the command. + It will not restore the target partition's data, if the command + progressed far enough to cause data loss. + Important note: if this API is invoked with force==true, internal + state may be left behind. + + :param operation_id: A GUID that identifies a call of this API. This + is passed into the corresponding GetProgress API + :type operation_id: str + :param force: Indicates whether to gracefully roll back and clean up + internal system state modified by executing the user-induced + operation. + :type force: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.cancel_operation.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') + query_parameters['Force'] = self._serialize.query("force", force, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + cancel_operation.metadata = {'url': '/Faults/$/Cancel'} + + def create_backup_policy( + self, backup_policy_description, timeout=60, validate_connection=False, custom_headers=None, raw=False, **operation_config): + """Creates a backup policy. + + Creates a backup policy which can be associated later with a Service + Fabric application, service or a partition for periodic backup. + + :param backup_policy_description: Describes the backup policy. + :type backup_policy_description: + ~azure.servicefabric.models.BackupPolicyDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param validate_connection: Specifies whether to validate the storage + connection and credentials before creating or updating the backup + policies. + :type validate_connection: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.create_backup_policy.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if validate_connection is not None: + query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + create_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/$/Create'} + + def delete_backup_policy( + self, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes the backup policy. + + Deletes an existing backup policy. A backup policy must be created + before it can be deleted. A currently active backup policy, associated + with any Service Fabric application, service or partition, cannot be + deleted without first deleting the mapping. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.delete_backup_policy.metadata['url'] + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Delete'} + + def get_backup_policy_list( + self, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets all the backup policies configured. + + Get a list of all the backup policies configured. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupPolicyDescriptionList or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PagedBackupPolicyDescriptionList + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_backup_policy_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupPolicyDescriptionList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_backup_policy_list.metadata = {'url': '/BackupRestore/BackupPolicies'} + + def get_backup_policy_by_name( + self, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets a particular backup policy by name. + + Gets a particular backup policy identified by {backupPolicyName}. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: BackupPolicyDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.BackupPolicyDescription or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_backup_policy_by_name.metadata['url'] + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('BackupPolicyDescription', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_backup_policy_by_name.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}'} + + def get_all_entities_backed_up_by_policy( + self, backup_policy_name, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of backup entities that are associated with this policy. + + Returns a list of Service Fabric application, service or partition + which are associated with this backup policy. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupEntityList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupEntityList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_all_entities_backed_up_by_policy.metadata['url'] + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupEntityList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_all_entities_backed_up_by_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/GetBackupEnabledEntities'} + + def update_backup_policy( + self, backup_policy_description, backup_policy_name, timeout=60, validate_connection=False, custom_headers=None, raw=False, **operation_config): + """Updates the backup policy. + + Updates the backup policy identified by {backupPolicyName}. + + :param backup_policy_description: Describes the backup policy. + :type backup_policy_description: + ~azure.servicefabric.models.BackupPolicyDescription + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param validate_connection: Specifies whether to validate the storage + connection and credentials before creating or updating the backup + policies. + :type validate_connection: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.update_backup_policy.metadata['url'] + path_format_arguments = { + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if validate_connection is not None: + query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + update_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Update'} + + def enable_application_backup( + self, application_id, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Enables periodic backup of stateful partitions under this Service + Fabric application. + + Enables periodic backup of stateful partitions which are part of this + Service Fabric application. Each partition is backed up individually as + per the specified backup policy description. + Note only C# based Reliable Actor and Reliable Stateful services are + currently supported for periodic backup. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param backup_policy_name: Name of the backup policy to be used for + enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + enable_backup_description = models.EnableBackupDescription(backup_policy_name=backup_policy_name) + + api_version = "6.4" + + # Construct URL + url = self.enable_application_backup.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(enable_backup_description, 'EnableBackupDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + enable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/EnableBackup'} + + def disable_application_backup( + self, application_id, clean_backup, timeout=60, custom_headers=None, raw=False, **operation_config): + """Disables periodic backup of Service Fabric application. + + Disables periodic backup of Service Fabric application which was + previously enabled. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param clean_backup: Boolean flag to delete backups. It can be set to + true for deleting all the backups which were created for the backup + entity that is getting disabled for backup. + :type clean_backup: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + disable_backup_description = None + if clean_backup is not None: + disable_backup_description = models.DisableBackupDescription(clean_backup=clean_backup) + + api_version = "6.4" + + # Construct URL + url = self.disable_application_backup.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if disable_backup_description is not None: + body_content = self._serialize.body(disable_backup_description, 'DisableBackupDescription') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + disable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/DisableBackup'} + + def get_application_backup_configuration_info( + self, application_id, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the Service Fabric application backup configuration information. + + Gets the Service Fabric backup configuration information for the + application and the services and partitions under this application. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupConfigurationInfoList or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_application_backup_configuration_info.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupConfigurationInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_backup_configuration_info.metadata = {'url': '/Applications/{applicationId}/$/GetBackupConfigurationInfo'} + + def get_application_backup_list( + self, application_id, timeout=60, latest=False, start_date_time_filter=None, end_date_time_filter=None, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): + """Gets the list of backups available for every partition in this + application. + + Returns a list of backups available for every partition in this Service + Fabric application. The server enumerates all the backups available at + the backup location configured in the backup policy. It also allows + filtering of the result based on start and end datetime or just + fetching the latest available backup for every partition. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup + available for a partition for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which + to enumerate backups, in datetime format. The date time must be + specified in ISO8601 format. This is an optional parameter. If not + specified, all backups from the beginning are enumerated. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specify the end date time till which to + enumerate backups, in datetime format. The date time must be specified + in ISO8601 format. This is an optional parameter. If not specified, + enumeration is done till the most recent backup. + :type end_date_time_filter: datetime + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_application_backup_list.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_backup_list.metadata = {'url': '/Applications/{applicationId}/$/GetBackups'} + + def suspend_application_backup( + self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Suspends periodic backup for the specified Service Fabric application. + + The application which is configured to take periodic backups, is + suspended for taking further backups till it is resumed again. This + operation applies to the entire application's hierarchy. It means all + the services and partitions under this application are now suspended + for backup. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.suspend_application_backup.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + suspend_application_backup.metadata = {'url': '/Applications/{applicationId}/$/SuspendBackup'} + + def resume_application_backup( + self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Resumes periodic backup of a Service Fabric application which was + previously suspended. + + The previously suspended Service Fabric application resumes taking + periodic backup as per the backup policy currently configured for the + same. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.resume_application_backup.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + resume_application_backup.metadata = {'url': '/Applications/{applicationId}/$/ResumeBackup'} + + def enable_service_backup( + self, service_id, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Enables periodic backup of stateful partitions under this Service + Fabric service. + + Enables periodic backup of stateful partitions which are part of this + Service Fabric service. Each partition is backed up individually as per + the specified backup policy description. In case the application, which + the service is part of, is already enabled for backup then this + operation would override the policy being used to take the periodic + backup for this service and its partitions (unless explicitly + overridden at the partition level). + Note only C# based Reliable Actor and Reliable Stateful services are + currently supported for periodic backup. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param backup_policy_name: Name of the backup policy to be used for + enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + enable_backup_description = models.EnableBackupDescription(backup_policy_name=backup_policy_name) + + api_version = "6.4" + + # Construct URL + url = self.enable_service_backup.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(enable_backup_description, 'EnableBackupDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + enable_service_backup.metadata = {'url': '/Services/{serviceId}/$/EnableBackup'} + + def disable_service_backup( + self, service_id, clean_backup, timeout=60, custom_headers=None, raw=False, **operation_config): + """Disables periodic backup of Service Fabric service which was previously + enabled. + + Disables periodic backup of Service Fabric service which was previously + enabled. Backup must be explicitly enabled before it can be disabled. + In case the backup is enabled for the Service Fabric application, which + this service is part of, this service would continue to be periodically + backed up as per the policy mapped at the application level. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param clean_backup: Boolean flag to delete backups. It can be set to + true for deleting all the backups which were created for the backup + entity that is getting disabled for backup. + :type clean_backup: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + disable_backup_description = None + if clean_backup is not None: + disable_backup_description = models.DisableBackupDescription(clean_backup=clean_backup) + + api_version = "6.4" + + # Construct URL + url = self.disable_service_backup.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if disable_backup_description is not None: + body_content = self._serialize.body(disable_backup_description, 'DisableBackupDescription') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + disable_service_backup.metadata = {'url': '/Services/{serviceId}/$/DisableBackup'} + + def get_service_backup_configuration_info( + self, service_id, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the Service Fabric service backup configuration information. + + Gets the Service Fabric backup configuration information for the + service and the partitions under this service. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupConfigurationInfoList or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_service_backup_configuration_info.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupConfigurationInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_backup_configuration_info.metadata = {'url': '/Services/{serviceId}/$/GetBackupConfigurationInfo'} + + def get_service_backup_list( + self, service_id, timeout=60, latest=False, start_date_time_filter=None, end_date_time_filter=None, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): + """Gets the list of backups available for every partition in this service. + + Returns a list of backups available for every partition in this Service + Fabric service. The server enumerates all the backups available in the + backup store configured in the backup policy. It also allows filtering + of the result based on start and end datetime or just fetching the + latest available backup for every partition. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup + available for a partition for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which + to enumerate backups, in datetime format. The date time must be + specified in ISO8601 format. This is an optional parameter. If not + specified, all backups from the beginning are enumerated. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specify the end date time till which to + enumerate backups, in datetime format. The date time must be specified + in ISO8601 format. This is an optional parameter. If not specified, + enumeration is done till the most recent backup. + :type end_date_time_filter: datetime + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_service_backup_list.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_backup_list.metadata = {'url': '/Services/{serviceId}/$/GetBackups'} + + def suspend_service_backup( + self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Suspends periodic backup for the specified Service Fabric service. + + The service which is configured to take periodic backups, is suspended + for taking further backups till it is resumed again. This operation + applies to the entire service's hierarchy. It means all the partitions + under this service are now suspended for backup. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.suspend_service_backup.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + suspend_service_backup.metadata = {'url': '/Services/{serviceId}/$/SuspendBackup'} + + def resume_service_backup( + self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Resumes periodic backup of a Service Fabric service which was + previously suspended. + + The previously suspended Service Fabric service resumes taking periodic + backup as per the backup policy currently configured for the same. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.resume_service_backup.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + resume_service_backup.metadata = {'url': '/Services/{serviceId}/$/ResumeBackup'} + + def enable_partition_backup( + self, partition_id, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Enables periodic backup of the stateful persisted partition. + + Enables periodic backup of stateful persisted partition. Each partition + is backed up as per the specified backup policy description. In case + the application or service, which is partition is part of, is already + enabled for backup then this operation would override the policy being + used to take the periodic backup of this partition. + Note only C# based Reliable Actor and Reliable Stateful services are + currently supported for periodic backup. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param backup_policy_name: Name of the backup policy to be used for + enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + enable_backup_description = models.EnableBackupDescription(backup_policy_name=backup_policy_name) + + api_version = "6.4" + + # Construct URL + url = self.enable_partition_backup.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(enable_backup_description, 'EnableBackupDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + enable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/EnableBackup'} + + def disable_partition_backup( + self, partition_id, clean_backup, timeout=60, custom_headers=None, raw=False, **operation_config): + """Disables periodic backup of Service Fabric partition which was + previously enabled. + + Disables periodic backup of partition which was previously enabled. + Backup must be explicitly enabled before it can be disabled. + In case the backup is enabled for the Service Fabric application or + service, which this partition is part of, this partition would continue + to be periodically backed up as per the policy mapped at the higher + level entity. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param clean_backup: Boolean flag to delete backups. It can be set to + true for deleting all the backups which were created for the backup + entity that is getting disabled for backup. + :type clean_backup: bool + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + disable_backup_description = None + if clean_backup is not None: + disable_backup_description = models.DisableBackupDescription(clean_backup=clean_backup) + + api_version = "6.4" + + # Construct URL + url = self.disable_partition_backup.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if disable_backup_description is not None: + body_content = self._serialize.body(disable_backup_description, 'DisableBackupDescription') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + disable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/DisableBackup'} + + def get_partition_backup_configuration_info( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the partition backup configuration information. + + Gets the Service Fabric Backup configuration information for the + specified partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PartitionBackupConfigurationInfo or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PartitionBackupConfigurationInfo + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_partition_backup_configuration_info.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PartitionBackupConfigurationInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_backup_configuration_info.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupConfigurationInfo'} + + def get_partition_backup_list( + self, partition_id, timeout=60, latest=False, start_date_time_filter=None, end_date_time_filter=None, custom_headers=None, raw=False, **operation_config): + """Gets the list of backups available for the specified partition. + + Returns a list of backups available for the specified partition. The + server enumerates all the backups available in the backup store + configured in the backup policy. It also allows filtering of the result + based on start and end datetime or just fetching the latest available + backup for the partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup + available for a partition for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which + to enumerate backups, in datetime format. The date time must be + specified in ISO8601 format. This is an optional parameter. If not + specified, all backups from the beginning are enumerated. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specify the end date time till which to + enumerate backups, in datetime format. The date time must be specified + in ISO8601 format. This is an optional parameter. If not specified, + enumeration is done till the most recent backup. + :type end_date_time_filter: datetime + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_partition_backup_list.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_backup_list.metadata = {'url': '/Partitions/{partitionId}/$/GetBackups'} + + def suspend_partition_backup( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Suspends periodic backup for the specified partition. + + The partition which is configured to take periodic backups, is + suspended for taking further backups till it is resumed again. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.suspend_partition_backup.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + suspend_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/SuspendBackup'} + + def resume_partition_backup( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Resumes periodic backup of partition which was previously suspended. + + The previously suspended partition resumes taking periodic backup as + per the backup policy currently configured for the same. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.resume_partition_backup.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + resume_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/ResumeBackup'} + + def backup_partition( + self, partition_id, backup_timeout=10, timeout=60, backup_storage=None, custom_headers=None, raw=False, **operation_config): + """Triggers backup of the partition's state. + + Creates a backup of the stateful persisted partition's state. In case + the partition is already being periodically backed up, then by default + the new backup is created at the same backup storage. One can also + override the same by specifying the backup storage details as part of + the request body. Once the backup is initiated, its progress can be + tracked using the GetBackupProgress operation. + In case, the operation times out, specify a greater backup timeout + value in the query parameter. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param backup_timeout: Specifies the maximum amount of time, in + minutes, to wait for the backup operation to complete. Post that, the + operation completes with timeout error. However, in certain corner + cases it could be that though the operation returns back timeout, the + backup actually goes through. In case of timeout error, its + recommended to invoke this operation again with a greater timeout + value. The default value for the same is 10 minutes. + :type backup_timeout: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param backup_storage: Specifies the details of the backup storage + where to save the backup. + :type backup_storage: + ~azure.servicefabric.models.BackupStorageDescription + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + backup_partition_description = None + if backup_storage is not None: + backup_partition_description = models.BackupPartitionDescription(backup_storage=backup_storage) + + api_version = "6.4" + + # Construct URL + url = self.backup_partition.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if backup_timeout is not None: + query_parameters['BackupTimeout'] = self._serialize.query("backup_timeout", backup_timeout, 'int') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if backup_partition_description is not None: + body_content = self._serialize.body(backup_partition_description, 'BackupPartitionDescription') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + backup_partition.metadata = {'url': '/Partitions/{partitionId}/$/Backup'} + + def get_partition_backup_progress( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets details for the latest backup triggered for this partition. + + Returns information about the state of the latest backup along with + details or failure reason in case of completion. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: BackupProgressInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.BackupProgressInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_partition_backup_progress.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('BackupProgressInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_backup_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupProgress'} + + def restore_partition( + self, partition_id, restore_partition_description, restore_timeout=10, timeout=60, custom_headers=None, raw=False, **operation_config): + """Triggers restore of the state of the partition using the specified + restore partition description. + + Restores the state of a of the stateful persisted partition using the + specified backup point. In case the partition is already being + periodically backed up, then by default the backup point is looked for + in the storage specified in backup policy. One can also override the + same by specifying the backup storage details as part of the restore + partition description in body. Once the restore is initiated, its + progress can be tracked using the GetRestoreProgress operation. + In case, the operation times out, specify a greater restore timeout + value in the query parameter. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param restore_partition_description: Describes the parameters to + restore the partition. + :type restore_partition_description: + ~azure.servicefabric.models.RestorePartitionDescription + :param restore_timeout: Specifies the maximum amount of time to wait, + in minutes, for the restore operation to complete. Post that, the + operation returns back with timeout error. However, in certain corner + cases it could be that the restore operation goes through even though + it completes with timeout. In case of timeout error, its recommended + to invoke this operation again with a greater timeout value. the + default value for the same is 10 minutes. + :type restore_timeout: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.restore_partition.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if restore_timeout is not None: + query_parameters['RestoreTimeout'] = self._serialize.query("restore_timeout", restore_timeout, 'int') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(restore_partition_description, 'RestorePartitionDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + restore_partition.metadata = {'url': '/Partitions/{partitionId}/$/Restore'} + + def get_partition_restore_progress( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets details for the latest restore operation triggered for this + partition. + + Returns information about the state of the latest restore operation + along with details or failure reason in case of completion. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: RestoreProgressInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.RestoreProgressInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_partition_restore_progress.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('RestoreProgressInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_restore_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetRestoreProgress'} + + def get_backups_from_backup_location( + self, get_backup_by_storage_query_description, timeout=60, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): + """Gets the list of backups available for the specified backed up entity + at the specified backup location. + + Gets the list of backups available for the specified backed up entity + (Application, Service or Partition) at the specified backup location + (FileShare or Azure Blob Storage). + + :param get_backup_by_storage_query_description: Describes the filters + and backup storage details to be used for enumerating backups. + :type get_backup_by_storage_query_description: + ~azure.servicefabric.models.GetBackupByStorageQueryDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged query includes as + many results as possible that fit in the return message. + :type max_results: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_backups_from_backup_location.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(get_backup_by_storage_query_description, 'GetBackupByStorageQueryDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_backups_from_backup_location.metadata = {'url': '/BackupRestore/$/GetBackups'} + + def create_name( + self, name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Creates a Service Fabric name. + + Creates the specified Service Fabric name. + + :param name: The Service Fabric name, including the 'fabric:' URI + scheme. + :type name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + name_description = models.NameDescription(name=name) + + api_version = "6.0" + + # Construct URL + url = self.create_name.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(name_description, 'NameDescription') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + create_name.metadata = {'url': '/Names/$/Create'} + + def get_name_exists_info( + self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Returns whether the Service Fabric name exists. + + Returns whether the specified Service Fabric name exists. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_name_exists_info.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + get_name_exists_info.metadata = {'url': '/Names/{nameId}'} + + def delete_name( + self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes a Service Fabric name. + + Deletes the specified Service Fabric name. A name must be created + before it can be deleted. Deleting a name with child properties will + fail. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.delete_name.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_name.metadata = {'url': '/Names/{nameId}'} + + def get_sub_name_info_list( + self, name_id, recursive=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Enumerates all the Service Fabric names under a given name. + + Enumerates all the Service Fabric names under a given name. If the + subnames do not fit in a page, one page of results is returned as well + as a continuation token, which can be used to get the next page. + Querying a name that doesn't exist will fail. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param recursive: Allows specifying that the search performed should + be recursive. + :type recursive: bool + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedSubNameInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedSubNameInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_sub_name_info_list.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if recursive is not None: + query_parameters['Recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedSubNameInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_sub_name_info_list.metadata = {'url': '/Names/{nameId}/$/GetSubNames'} + + def get_property_info_list( + self, name_id, include_values=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets information on all Service Fabric properties under a given name. + + A Service Fabric name can have one or more named properties that store + custom information. This operation gets the information about these + properties in a paged list. The information includes name, value, and + metadata about each of the properties. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param include_values: Allows specifying whether to include the values + of the properties returned. True if values should be returned with the + metadata; False to return only property metadata. + :type include_values: bool + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non-empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results, then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedPropertyInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedPropertyInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_property_info_list.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if include_values is not None: + query_parameters['IncludeValues'] = self._serialize.query("include_values", include_values, 'bool') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PagedPropertyInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_property_info_list.metadata = {'url': '/Names/{nameId}/$/GetProperties'} + + def put_property( + self, name_id, property_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Creates or updates a Service Fabric property. + + Creates or updates the specified Service Fabric property under a given + name. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param property_description: Describes the Service Fabric property to + be created. + :type property_description: + ~azure.servicefabric.models.PropertyDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.put_property.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(property_description, 'PropertyDescription') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + put_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} + + def get_property_info( + self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the specified Service Fabric property. + + Gets the specified Service Fabric property under a given name. This + will always return both value and metadata. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param property_name: Specifies the name of the property to get. + :type property_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PropertyInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PropertyInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_property_info.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PropertyInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_property_info.metadata = {'url': '/Names/{nameId}/$/GetProperty'} + + def delete_property( + self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes the specified Service Fabric property. + + Deletes the specified Service Fabric property under a given name. A + property must be created before it can be deleted. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param property_name: Specifies the name of the property to get. + :type property_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.delete_property.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} + + def submit_property_batch( + self, name_id, timeout=60, operations=None, custom_headers=None, raw=False, **operation_config): + """Submits a property batch. + + Submits a batch of property operations. Either all or none of the + operations will be committed. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param operations: A list of the property batch operations to be + executed. + :type operations: + list[~azure.servicefabric.models.PropertyBatchOperation] + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PropertyBatchInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PropertyBatchInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + property_batch_description_list = models.PropertyBatchDescriptionList(operations=operations) + + api_version = "6.0" + + # Construct URL + url = self.submit_property_batch.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(property_batch_description_list, 'PropertyBatchDescriptionList') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 409]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SuccessfulPropertyBatchInfo', response) + if response.status_code == 409: + deserialized = self._deserialize('FailedPropertyBatchInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_property_batch.metadata = {'url': '/Names/{nameId}/$/GetProperties/$/SubmitBatch'} + + def get_cluster_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Cluster-related events. + + The response is list of ClusterEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ClusterEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_cluster_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ClusterEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_event_list.metadata = {'url': '/EventsStore/Cluster/Events'} + + def get_containers_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Containers-related events. + + The response is list of ContainerInstanceEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ContainerInstanceEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_containers_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ContainerInstanceEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_containers_event_list.metadata = {'url': '/EventsStore/Containers/Events'} + + def get_node_event_list( + self, node_name, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets a Node-related events. + + The response is list of NodeEvent objects. + + :param node_name: The name of the node. + :type node_name: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.NodeEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_node_event_list.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[NodeEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_node_event_list.metadata = {'url': '/EventsStore/Nodes/{nodeName}/$/Events'} + + def get_nodes_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Nodes-related Events. + + The response is list of NodeEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.NodeEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_nodes_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[NodeEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_nodes_event_list.metadata = {'url': '/EventsStore/Nodes/Events'} + + def get_application_event_list( + self, application_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets an Application-related events. + + The response is list of ApplicationEvent objects. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ApplicationEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_application_event_list.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ApplicationEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_event_list.metadata = {'url': '/EventsStore/Applications/{applicationId}/$/Events'} + + def get_applications_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Applications-related events. + + The response is list of ApplicationEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ApplicationEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_applications_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ApplicationEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_applications_event_list.metadata = {'url': '/EventsStore/Applications/Events'} + + def get_service_event_list( + self, service_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets a Service-related events. + + The response is list of ServiceEvent objects. + + :param service_id: The identity of the service. This ID is typically + the full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ServiceEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_service_event_list.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ServiceEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_event_list.metadata = {'url': '/EventsStore/Services/{serviceId}/$/Events'} + + def get_services_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Services-related events. + + The response is list of ServiceEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ServiceEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_services_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ServiceEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_services_event_list.metadata = {'url': '/EventsStore/Services/Events'} + + def get_partition_event_list( + self, partition_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets a Partition-related events. + + The response is list of PartitionEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.PartitionEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_partition_event_list.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[PartitionEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Events'} + + def get_partitions_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Partitions-related events. + + The response is list of PartitionEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.PartitionEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_partitions_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[PartitionEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partitions_event_list.metadata = {'url': '/EventsStore/Partitions/Events'} + + def get_partition_replica_event_list( + self, partition_id, replica_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets a Partition Replica-related events. + + The response is list of ReplicaEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ReplicaEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_partition_replica_event_list.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ReplicaEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_replica_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/{replicaId}/$/Events'} + + def get_partition_replicas_event_list( + self, partition_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Replicas-related events for a Partition. + + The response is list of ReplicaEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ReplicaEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_partition_replicas_event_list.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[ReplicaEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_replicas_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/Events'} + + def get_correlated_event_list( + self, event_instance_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets all correlated events for a given event. + + The response is list of FabricEvents. + + :param event_instance_id: The EventInstanceId. + :type event_instance_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.FabricEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.4" + + # Construct URL + url = self.get_correlated_event_list.metadata['url'] + path_format_arguments = { + 'eventInstanceId': self._serialize.url("event_instance_id", event_instance_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[FabricEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_correlated_event_list.metadata = {'url': '/EventsStore/CorrelatedEvents/{eventInstanceId}/$/Events'} diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_apis_operations.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_apis_operations.py deleted file mode 100644 index fec8f7cc0449..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/operations/_service_fabric_client_apis_operations.py +++ /dev/null @@ -1,16866 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceFabricClientAPIsOperationsMixin(object): - - def get_cluster_manifest( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterManifest" - """Get the Service Fabric cluster manifest. - - Get the Service Fabric cluster manifest. The cluster manifest contains properties of the - cluster that include different node types on the cluster, - security configurations, fault, and upgrade domain topologies, etc. - - These properties are specified as part of the ClusterConfig.JSON file while deploying a - stand-alone cluster. However, most of the information in the cluster manifest - is generated internally by service fabric during cluster deployment in other deployment - scenarios (e.g. when using Azure portal). - - The contents of the cluster manifest are for informational purposes only and users are not - expected to take a dependency on the format of the file contents or its interpretation. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterManifest, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterManifest - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterManifest"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_manifest.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterManifest', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_manifest.metadata = {'url': '/$/GetClusterManifest'} # type: ignore - - def get_cluster_health( - self, - nodes_health_state_filter=0, # type: Optional[int] - applications_health_state_filter=0, # type: Optional[int] - events_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - include_system_application_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterHealth" - """Gets the health of a Service Fabric cluster. - - Use EventsHealthStateFilter to filter the collection of health events reported on the cluster - based on the health state. - Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the - collection of nodes and applications returned based on their aggregated health state. - - :param nodes_health_state_filter: Allows filtering of the node health state objects returned in - the result of cluster health query - based on their health state. The possible values for this parameter include integer value of - one of the - following health states. Only nodes that match the filter are returned. All nodes are used to - evaluate the aggregated health state. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of nodes with HealthState value of - OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type nodes_health_state_filter: int - :param applications_health_state_filter: Allows filtering of the application health state - objects returned in the result of cluster health - query based on their health state. - The possible values for this parameter include integer value obtained from members or bitwise - operations - on members of HealthStateFilter enumeration. Only applications that match the filter are - returned. - All applications are used to evaluate the aggregated health state. If not specified, all - entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of applications with HealthState - value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type applications_health_state_filter: int - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param include_system_application_health_statistics: Indicates whether the health statistics - should include the fabric:/System application health statistics. False by default. - If IncludeSystemApplicationHealthStatistics is set to true, the health statistics include the - entities that belong to the fabric:/System application. - Otherwise, the query result includes health statistics only for user applications. - The health statistics must be included in the query result for this parameter to be applied. - :type include_system_application_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_health.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if nodes_health_state_filter is not None: - query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') - if applications_health_state_filter is not None: - query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if include_system_application_health_statistics is not None: - query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_health.metadata = {'url': '/$/GetClusterHealth'} # type: ignore - - def get_cluster_health_using_policy( - self, - nodes_health_state_filter=0, # type: Optional[int] - applications_health_state_filter=0, # type: Optional[int] - events_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - include_system_application_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - application_health_policy_map=None, # type: Optional[List["_models.ApplicationHealthPolicyMapItem"]] - cluster_health_policy=None, # type: Optional["_models.ClusterHealthPolicy"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterHealth" - """Gets the health of a Service Fabric cluster using the specified policy. - - Use EventsHealthStateFilter to filter the collection of health events reported on the cluster - based on the health state. - Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the - collection of nodes and applications returned based on their aggregated health state. - Use ClusterHealthPolicies to override the health policies used to evaluate the health. - - :param nodes_health_state_filter: Allows filtering of the node health state objects returned in - the result of cluster health query - based on their health state. The possible values for this parameter include integer value of - one of the - following health states. Only nodes that match the filter are returned. All nodes are used to - evaluate the aggregated health state. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of nodes with HealthState value of - OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type nodes_health_state_filter: int - :param applications_health_state_filter: Allows filtering of the application health state - objects returned in the result of cluster health - query based on their health state. - The possible values for this parameter include integer value obtained from members or bitwise - operations - on members of HealthStateFilter enumeration. Only applications that match the filter are - returned. - All applications are used to evaluate the aggregated health state. If not specified, all - entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of applications with HealthState - value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type applications_health_state_filter: int - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param include_system_application_health_statistics: Indicates whether the health statistics - should include the fabric:/System application health statistics. False by default. - If IncludeSystemApplicationHealthStatistics is set to true, the health statistics include the - entities that belong to the fabric:/System application. - Otherwise, the query result includes health statistics only for user applications. - The health statistics must be included in the query result for this parameter to be applied. - :type include_system_application_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy_map: Defines a map that contains specific application health - policies for different applications. - Each entry specifies as key the application name and as value an ApplicationHealthPolicy used - to evaluate the application health. - If an application is not specified in the map, the application health evaluation uses the - ApplicationHealthPolicy found in its application manifest or the default application health - policy (if no health policy is defined in the manifest). - The map is empty by default. - :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] - :param cluster_health_policy: Defines a health policy used to evaluate the health of the - cluster or of a cluster node. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cluster_health_policies = _models.ClusterHealthPolicies(application_health_policy_map=application_health_policy_map, cluster_health_policy=cluster_health_policy) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_cluster_health_using_policy.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if nodes_health_state_filter is not None: - query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') - if applications_health_state_filter is not None: - query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if include_system_application_health_statistics is not None: - query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _cluster_health_policies is not None: - body_content = self._serialize.body(_cluster_health_policies, 'ClusterHealthPolicies') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_health_using_policy.metadata = {'url': '/$/GetClusterHealth'} # type: ignore - - def get_cluster_health_chunk( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterHealthChunk" - """Gets the health of a Service Fabric cluster using health chunks. - - Gets the health of a Service Fabric cluster using health chunks. Includes the aggregated health - state of the cluster, but none of the cluster entities. - To expand the cluster health and get the health state of all or some of the entities, use the - POST URI and specify the cluster health chunk query description. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterHealthChunk, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterHealthChunk - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealthChunk"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_health_chunk.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterHealthChunk', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_health_chunk.metadata = {'url': '/$/GetClusterHealthChunk'} # type: ignore - - def get_cluster_health_chunk_using_policy_and_advanced_filters( - self, - timeout=60, # type: Optional[int] - cluster_health_chunk_query_description=None, # type: Optional["_models.ClusterHealthChunkQueryDescription"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterHealthChunk" - """Gets the health of a Service Fabric cluster using health chunks. - - Gets the health of a Service Fabric cluster using health chunks. The health evaluation is done - based on the input cluster health chunk query description. - The query description allows users to specify health policies for evaluating the cluster and - its children. - Users can specify very flexible filters to select which cluster entities to return. The - selection can be done based on the entities health state and based on the hierarchy. - The query can return multi-level children of the entities based on the specified filters. For - example, it can return one application with a specified name, and for this application, return - only services that are in Error or Warning, and all partitions and replicas for one of these - services. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param cluster_health_chunk_query_description: Describes the cluster and application health - policies used to evaluate the cluster health and the filters to select which cluster entities - to be returned. - If the cluster health policy is present, it is used to evaluate the cluster events and the - cluster nodes. If not present, the health evaluation uses the cluster health policy defined in - the cluster manifest or the default cluster health policy. - By default, each application is evaluated using its specific application health policy, - defined in the application manifest, or the default health policy, if no policy is defined in - manifest. - If the application health policy map is specified, and it has an entry for an application, the - specified application health policy - is used to evaluate the application health. - Users can specify very flexible filters to select which cluster entities to include in - response. The selection can be done based on the entities health state and based on the - hierarchy. - The query can return multi-level children of the entities based on the specified filters. For - example, it can return one application with a specified name, and for this application, return - only services that are in Error or Warning, and all partitions and replicas for one of these - services. - :type cluster_health_chunk_query_description: ~azure.servicefabric.models.ClusterHealthChunkQueryDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterHealthChunk, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterHealthChunk - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterHealthChunk"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_cluster_health_chunk_using_policy_and_advanced_filters.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if cluster_health_chunk_query_description is not None: - body_content = self._serialize.body(cluster_health_chunk_query_description, 'ClusterHealthChunkQueryDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterHealthChunk', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_health_chunk_using_policy_and_advanced_filters.metadata = {'url': '/$/GetClusterHealthChunk'} # type: ignore - - def report_cluster_health( - self, - health_information, # type: "_models.HealthInformation" - immediate=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sends a health report on the Service Fabric cluster. - - Sends a health report on a Service Fabric cluster. The report must contain the information - about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway node, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetClusterHealth and check that - the report appears in the HealthEvents section. - - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_cluster_health.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_cluster_health.metadata = {'url': '/$/ReportClusterHealth'} # type: ignore - - def get_provisioned_fabric_code_version_info_list( - self, - code_version=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List["_models.FabricCodeVersionInfo"] - """Gets a list of fabric code versions that are provisioned in a Service Fabric cluster. - - Gets a list of information about fabric code versions that are provisioned in the cluster. The - parameter CodeVersion can be used to optionally filter the output to only that particular - version. - - :param code_version: The product version of Service Fabric. - :type code_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of FabricCodeVersionInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.FabricCodeVersionInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricCodeVersionInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_provisioned_fabric_code_version_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if code_version is not None: - query_parameters['CodeVersion'] = self._serialize.query("code_version", code_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[FabricCodeVersionInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_provisioned_fabric_code_version_info_list.metadata = {'url': '/$/GetProvisionedCodeVersions'} # type: ignore - - def get_provisioned_fabric_config_version_info_list( - self, - config_version=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List["_models.FabricConfigVersionInfo"] - """Gets a list of fabric config versions that are provisioned in a Service Fabric cluster. - - Gets a list of information about fabric config versions that are provisioned in the cluster. - The parameter ConfigVersion can be used to optionally filter the output to only that particular - version. - - :param config_version: The config version of Service Fabric. - :type config_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of FabricConfigVersionInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.FabricConfigVersionInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricConfigVersionInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_provisioned_fabric_config_version_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if config_version is not None: - query_parameters['ConfigVersion'] = self._serialize.query("config_version", config_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[FabricConfigVersionInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_provisioned_fabric_config_version_info_list.metadata = {'url': '/$/GetProvisionedConfigVersions'} # type: ignore - - def get_cluster_upgrade_progress( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterUpgradeProgressObject" - """Gets the progress of the current cluster upgrade. - - Gets the current progress of the ongoing cluster upgrade. If no upgrade is currently in - progress, get the last state of the previous cluster upgrade. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterUpgradeProgressObject, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterUpgradeProgressObject - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterUpgradeProgressObject"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_upgrade_progress.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterUpgradeProgressObject', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_upgrade_progress.metadata = {'url': '/$/GetUpgradeProgress'} # type: ignore - - def get_cluster_configuration( - self, - configuration_api_version, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterConfiguration" - """Get the Service Fabric standalone cluster configuration. - - The cluster configuration contains properties of the cluster that include different node types - on the cluster, - security configurations, fault, and upgrade domain topologies, etc. - - :param configuration_api_version: The API version of the Standalone cluster json configuration. - :type configuration_api_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterConfiguration, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterConfiguration - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterConfiguration"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_configuration.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ConfigurationApiVersion'] = self._serialize.query("configuration_api_version", configuration_api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterConfiguration', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_configuration.metadata = {'url': '/$/GetClusterConfiguration'} # type: ignore - - def get_cluster_configuration_upgrade_status( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterConfigurationUpgradeStatusInfo" - """Get the cluster configuration upgrade status of a Service Fabric standalone cluster. - - Get the cluster configuration upgrade status details of a Service Fabric standalone cluster. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterConfigurationUpgradeStatusInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterConfigurationUpgradeStatusInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterConfigurationUpgradeStatusInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_configuration_upgrade_status.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterConfigurationUpgradeStatusInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_configuration_upgrade_status.metadata = {'url': '/$/GetClusterConfigurationUpgradeStatus'} # type: ignore - - def get_upgrade_orchestration_service_state( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.UpgradeOrchestrationServiceState" - """Get the service state of Service Fabric Upgrade Orchestration Service. - - Get the service state of Service Fabric Upgrade Orchestration Service. This API is internally - used for support purposes. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UpgradeOrchestrationServiceState, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceState - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UpgradeOrchestrationServiceState"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_upgrade_orchestration_service_state.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UpgradeOrchestrationServiceState', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_upgrade_orchestration_service_state.metadata = {'url': '/$/GetUpgradeOrchestrationServiceState'} # type: ignore - - def set_upgrade_orchestration_service_state( - self, - timeout=60, # type: Optional[int] - service_state=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.UpgradeOrchestrationServiceStateSummary" - """Update the service state of Service Fabric Upgrade Orchestration Service. - - Update the service state of Service Fabric Upgrade Orchestration Service. This API is - internally used for support purposes. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param service_state: The state of Service Fabric Upgrade Orchestration Service. - :type service_state: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UpgradeOrchestrationServiceStateSummary, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UpgradeOrchestrationServiceStateSummary - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UpgradeOrchestrationServiceStateSummary"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _upgrade_orchestration_service_state = _models.UpgradeOrchestrationServiceState(service_state=service_state) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.set_upgrade_orchestration_service_state.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_upgrade_orchestration_service_state, 'UpgradeOrchestrationServiceState') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UpgradeOrchestrationServiceStateSummary', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - set_upgrade_orchestration_service_state.metadata = {'url': '/$/SetUpgradeOrchestrationServiceState'} # type: ignore - - def provision_cluster( - self, - timeout=60, # type: Optional[int] - code_file_path=None, # type: Optional[str] - cluster_manifest_file_path=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Provision the code or configuration packages of a Service Fabric cluster. - - Validate and provision the code or configuration packages of a Service Fabric cluster. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param code_file_path: The cluster code package file path. - :type code_file_path: str - :param cluster_manifest_file_path: The cluster manifest file path. - :type cluster_manifest_file_path: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _provision_fabric_description = _models.ProvisionFabricDescription(code_file_path=code_file_path, cluster_manifest_file_path=cluster_manifest_file_path) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.provision_cluster.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_provision_fabric_description, 'ProvisionFabricDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - provision_cluster.metadata = {'url': '/$/Provision'} # type: ignore - - def unprovision_cluster( - self, - timeout=60, # type: Optional[int] - code_version=None, # type: Optional[str] - config_version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Unprovision the code or configuration packages of a Service Fabric cluster. - - It is supported to unprovision code and configuration separately. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param code_version: The cluster code package version. - :type code_version: str - :param config_version: The cluster manifest version. - :type config_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _unprovision_fabric_description = _models.UnprovisionFabricDescription(code_version=code_version, config_version=config_version) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.unprovision_cluster.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_unprovision_fabric_description, 'UnprovisionFabricDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - unprovision_cluster.metadata = {'url': '/$/Unprovision'} # type: ignore - - def rollback_cluster_upgrade( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Roll back the upgrade of a Service Fabric cluster. - - Roll back the code or configuration upgrade of a Service Fabric cluster. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.rollback_cluster_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - rollback_cluster_upgrade.metadata = {'url': '/$/RollbackUpgrade'} # type: ignore - - def resume_cluster_upgrade( - self, - upgrade_domain, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Make the cluster upgrade move on to the next upgrade domain. - - Make the cluster code or configuration upgrade move on to the next upgrade domain if - appropriate. - - :param upgrade_domain: The next upgrade domain for this cluster upgrade. - :type upgrade_domain: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _resume_cluster_upgrade_description = _models.ResumeClusterUpgradeDescription(upgrade_domain=upgrade_domain) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.resume_cluster_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_resume_cluster_upgrade_description, 'ResumeClusterUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_cluster_upgrade.metadata = {'url': '/$/MoveToNextUpgradeDomain'} # type: ignore - - def start_cluster_upgrade( - self, - start_cluster_upgrade_description, # type: "_models.StartClusterUpgradeDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Start upgrading the code or configuration version of a Service Fabric cluster. - - Validate the supplied upgrade parameters and start upgrading the code or configuration version - of a Service Fabric cluster if the parameters are valid. - - :param start_cluster_upgrade_description: Describes the parameters for starting a cluster - upgrade. - :type start_cluster_upgrade_description: ~azure.servicefabric.models.StartClusterUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_cluster_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(start_cluster_upgrade_description, 'StartClusterUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_cluster_upgrade.metadata = {'url': '/$/Upgrade'} # type: ignore - - def start_cluster_configuration_upgrade( - self, - cluster_configuration_upgrade_description, # type: "_models.ClusterConfigurationUpgradeDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Start upgrading the configuration of a Service Fabric standalone cluster. - - Validate the supplied configuration upgrade parameters and start upgrading the cluster - configuration if the parameters are valid. - - :param cluster_configuration_upgrade_description: Parameters for a standalone cluster - configuration upgrade. - :type cluster_configuration_upgrade_description: ~azure.servicefabric.models.ClusterConfigurationUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_cluster_configuration_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(cluster_configuration_upgrade_description, 'ClusterConfigurationUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_cluster_configuration_upgrade.metadata = {'url': '/$/StartClusterConfigurationUpgrade'} # type: ignore - - def update_cluster_upgrade( - self, - update_cluster_upgrade_description, # type: "_models.UpdateClusterUpgradeDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Update the upgrade parameters of a Service Fabric cluster upgrade. - - Update the upgrade parameters used during a Service Fabric cluster upgrade. - - :param update_cluster_upgrade_description: Parameters for updating a cluster upgrade. - :type update_cluster_upgrade_description: ~azure.servicefabric.models.UpdateClusterUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_cluster_upgrade.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(update_cluster_upgrade_description, 'UpdateClusterUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - update_cluster_upgrade.metadata = {'url': '/$/UpdateUpgrade'} # type: ignore - - def get_aad_metadata( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.AadMetadataObject" - """Gets the Azure Active Directory metadata used for secured connection to cluster. - - Gets the Azure Active Directory metadata used for secured connection to cluster. - This API is not supposed to be called separately. It provides information needed to set up an - Azure Active Directory secured connection with a Service Fabric cluster. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: AadMetadataObject, or the result of cls(response) - :rtype: ~azure.servicefabric.models.AadMetadataObject - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.AadMetadataObject"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_aad_metadata.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('AadMetadataObject', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_aad_metadata.metadata = {'url': '/$/GetAadMetadata'} # type: ignore - - def get_cluster_version( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterVersion" - """Get the current Service Fabric cluster version. - - If a cluster upgrade is happening, then this API will return the lowest (older) version of the - current and target cluster runtime versions. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterVersion, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterVersion - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterVersion"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_version.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterVersion', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_version.metadata = {'url': '/$/GetClusterVersion'} # type: ignore - - def get_cluster_load( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ClusterLoadInfo" - """Gets the load of a Service Fabric cluster. - - Retrieves the load information of a Service Fabric cluster for all the metrics that have load - or capacity defined. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ClusterLoadInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ClusterLoadInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterLoadInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_load.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ClusterLoadInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_load.metadata = {'url': '/$/GetLoadInformation'} # type: ignore - - def toggle_verbose_service_placement_health_reporting( - self, - enabled, # type: bool - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Changes the verbosity of service placement health reporting. - - If verbosity is set to true, then detailed health reports will be generated when replicas - cannot be placed or dropped. - If verbosity is set to false, then no health reports will be generated when replicas cannot be - placed or dropped. - - :param enabled: The verbosity of service placement health reporting. - :type enabled: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.toggle_verbose_service_placement_health_reporting.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['Enabled'] = self._serialize.query("enabled", enabled, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - toggle_verbose_service_placement_health_reporting.metadata = {'url': '/$/ToggleVerboseServicePlacementHealthReporting'} # type: ignore - - def get_node_info_list( - self, - continuation_token_parameter=None, # type: Optional[str] - node_status_filter="default", # type: Optional[Union[str, "_models.NodeStatusFilter"]] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedNodeInfoList" - """Gets the list of nodes in the Service Fabric cluster. - - The response includes the name, status, ID, health, uptime, and other details about the nodes. - - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param node_status_filter: Allows filtering the nodes based on the NodeStatus. Only the nodes - that are matching the specified filter value will be returned. The filter value can be one of - the following. - :type node_status_filter: str or ~azure.servicefabric.models.NodeStatusFilter - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedNodeInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedNodeInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedNodeInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if node_status_filter is not None: - query_parameters['NodeStatusFilter'] = self._serialize.query("node_status_filter", node_status_filter, 'str') - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedNodeInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_info_list.metadata = {'url': '/Nodes'} # type: ignore - - def get_node_info( - self, - node_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.NodeInfo"] - """Gets the information about a specific node in the Service Fabric cluster. - - The response includes the name, status, ID, health, uptime, and other details about the node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NodeInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('NodeInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_info.metadata = {'url': '/Nodes/{nodeName}'} # type: ignore - - def get_node_health( - self, - node_name, # type: str - events_health_state_filter=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.NodeHealth" - """Gets the health of a Service Fabric node. - - Gets the health of a Service Fabric node. Use EventsHealthStateFilter to filter the collection - of health events reported on the node based on the health state. If the node that you specify - by name does not exist in the health store, this returns an error. - - :param node_name: The name of the node. - :type node_name: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('NodeHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_health.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} # type: ignore - - def get_node_health_using_policy( - self, - node_name, # type: str - events_health_state_filter=0, # type: Optional[int] - timeout=60, # type: Optional[int] - cluster_health_policy=None, # type: Optional["_models.ClusterHealthPolicy"] - **kwargs # type: Any - ): - # type: (...) -> "_models.NodeHealth" - """Gets the health of a Service Fabric node, by using the specified health policy. - - Gets the health of a Service Fabric node. Use EventsHealthStateFilter to filter the collection - of health events reported on the node based on the health state. Use ClusterHealthPolicy in the - POST body to override the health policies used to evaluate the health. If the node that you - specify by name does not exist in the health store, this returns an error. - - :param node_name: The name of the node. - :type node_name: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param cluster_health_policy: Describes the health policies used to evaluate the health of a - cluster or node. If not present, the health evaluation uses the health policy from cluster - manifest or the default health policy. - :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_node_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if cluster_health_policy is not None: - body_content = self._serialize.body(cluster_health_policy, 'ClusterHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('NodeHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} # type: ignore - - def report_node_health( - self, - node_name, # type: str - health_information, # type: "_models.HealthInformation" - immediate=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sends a health report on the Service Fabric node. - - Reports health state of the specified Service Fabric node. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway node, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetNodeHealth and check that the - report appears in the HealthEvents section. - - :param node_name: The name of the node. - :type node_name: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_node_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_node_health.metadata = {'url': '/Nodes/{nodeName}/$/ReportHealth'} # type: ignore - - def get_node_load_info( - self, - node_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.NodeLoadInfo" - """Gets the load information of a Service Fabric node. - - Retrieves the load information of a Service Fabric node for all the metrics that have load or - capacity defined. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeLoadInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeLoadInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeLoadInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_load_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('NodeLoadInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_load_info.metadata = {'url': '/Nodes/{nodeName}/$/GetLoadInformation'} # type: ignore - - def disable_node( - self, - node_name, # type: str - timeout=60, # type: Optional[int] - deactivation_intent=None, # type: Optional[Union[str, "_models.DeactivationIntent"]] - **kwargs # type: Any - ): - # type: (...) -> None - """Deactivate a Service Fabric cluster node with the specified deactivation intent. - - Deactivate a Service Fabric cluster node with the specified deactivation intent. Once the - deactivation is in progress, the deactivation intent can be increased, but not decreased (for - example, a node that is deactivated with the Pause intent can be deactivated further with - Restart, but not the other way around. Nodes may be reactivated using the Activate a node - operation any time after they are deactivated. If the deactivation is not complete, this will - cancel the deactivation. A node that goes down and comes back up while deactivated will still - need to be reactivated before services will be placed on that node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param deactivation_intent: Describes the intent or reason for deactivating the node. The - possible values are following. - :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _deactivation_intent_description = _models.DeactivationIntentDescription(deactivation_intent=deactivation_intent) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.disable_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_deactivation_intent_description, 'DeactivationIntentDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - disable_node.metadata = {'url': '/Nodes/{nodeName}/$/Deactivate'} # type: ignore - - def enable_node( - self, - node_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Activate a Service Fabric cluster node that is currently deactivated. - - Activates a Service Fabric cluster node that is currently deactivated. Once activated, the node - will again become a viable target for placing new replicas, and any deactivated replicas - remaining on the node will be reactivated. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.enable_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - enable_node.metadata = {'url': '/Nodes/{nodeName}/$/Activate'} # type: ignore - - def remove_node_state( - self, - node_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Notifies Service Fabric that the persisted state on a node has been permanently removed or lost. - - This implies that it is not possible to recover the persisted state of that node. This - generally happens if a hard disk has been wiped clean, or if a hard disk crashes. The node has - to be down for this operation to be successful. This operation lets Service Fabric know that - the replicas on that node no longer exist, and that Service Fabric should stop waiting for - those replicas to come back up. Do not run this cmdlet if the state on the node has not been - removed and the node can come back up with its state intact. Starting from Service Fabric 6.5, - in order to use this API for seed nodes, please change the seed nodes to regular (non-seed) - nodes and then invoke this API to remove the node state. If the cluster is running on Azure, - after the seed node goes down, Service Fabric will try to change it to a non-seed node - automatically. To make this happen, make sure the number of non-seed nodes in the primary node - type is no less than the number of Down seed nodes. If necessary, add more nodes to the primary - node type to achieve this. For standalone cluster, if the Down seed node is not expected to - come back up with its state intact, please remove the node from the cluster, see - https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-windows-server-add-remove-nodes. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.remove_node_state.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_node_state.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeState'} # type: ignore - - def restart_node( - self, - node_name, # type: str - timeout=60, # type: Optional[int] - node_instance_id="0", # type: str - create_fabric_dump="False", # type: Optional[Union[str, "_models.CreateFabricDump"]] - **kwargs # type: Any - ): - # type: (...) -> None - """Restarts a Service Fabric cluster node. - - Restarts a Service Fabric cluster node that is already started. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param node_instance_id: The instance ID of the target node. If instance ID is specified the - node is restarted only if it matches with the current instance of the node. A default value of - "0" would match any instance ID. The instance ID can be obtained using get node query. - :type node_instance_id: str - :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is - case-sensitive. - :type create_fabric_dump: str or ~azure.servicefabric.models.CreateFabricDump - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _restart_node_description = _models.RestartNodeDescription(node_instance_id=node_instance_id, create_fabric_dump=create_fabric_dump) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.restart_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_restart_node_description, 'RestartNodeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - restart_node.metadata = {'url': '/Nodes/{nodeName}/$/Restart'} # type: ignore - - def remove_configuration_overrides( - self, - node_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Removes configuration overrides on the specified node. - - This api allows removing all existing configuration overrides on specified node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.remove_configuration_overrides.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/RemoveConfigurationOverrides'} # type: ignore - - def get_configuration_overrides( - self, - node_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ConfigParameterOverride"] - """Gets the list of configuration overrides on the specified node. - - This api allows getting all existing configuration overrides on the specified node. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ConfigParameterOverride, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ConfigParameterOverride] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConfigParameterOverride"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_configuration_overrides.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ConfigParameterOverride]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_configuration_overrides.metadata = {'url': '/Nodes/{nodeName}/$/GetConfigurationOverrides'} # type: ignore - - def add_configuration_parameter_overrides( - self, - node_name, # type: str - config_parameter_override_list, # type: List["_models.ConfigParameterOverride"] - force=None, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Adds the list of configuration overrides on the specified node. - - This api allows adding all existing configuration overrides on the specified node. - - :param node_name: The name of the node. - :type node_name: str - :param config_parameter_override_list: Description for adding list of configuration overrides. - :type config_parameter_override_list: list[~azure.servicefabric.models.ConfigParameterOverride] - :param force: Force adding configuration overrides on specified nodes. - :type force: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.add_configuration_parameter_overrides.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force is not None: - query_parameters['Force'] = self._serialize.query("force", force, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(config_parameter_override_list, '[ConfigParameterOverride]') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - add_configuration_parameter_overrides.metadata = {'url': '/Nodes/{nodeName}/$/AddConfigurationParameterOverrides'} # type: ignore - - def remove_node_tags( - self, - node_name, # type: str - node_tags, # type: List[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Removes the list of tags from the specified node. - - This api allows removing set of tags from the specified node. - - :param node_name: The name of the node. - :type node_name: str - :param node_tags: Description for adding list of node tags. - :type node_tags: list[str] - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.remove_node_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(node_tags, '[str]') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeTags'} # type: ignore - - def add_node_tags( - self, - node_name, # type: str - node_tags, # type: List[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Adds the list of tags on the specified node. - - This api allows adding tags to the specified node. - - :param node_name: The name of the node. - :type node_name: str - :param node_tags: Description for adding list of node tags. - :type node_tags: list[str] - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.add_node_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(node_tags, '[str]') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - add_node_tags.metadata = {'url': '/Nodes/{nodeName}/$/AddNodeTags'} # type: ignore - - def get_application_type_info_list( - self, - application_type_definition_kind_filter=0, # type: Optional[int] - exclude_application_parameters=False, # type: Optional[bool] - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedApplicationTypeInfoList" - """Gets the list of application types in the Service Fabric cluster. - - Returns the information about the application types that are provisioned or in the process of - being provisioned in the Service Fabric cluster. Each version of an application type is - returned as one application type. The response includes the name, version, status, and other - details about the application type. This is a paged query, meaning that if not all of the - application types fit in a page, one page of results is returned as well as a continuation - token, which can be used to get the next page. For example, if there are 10 application types - but a page only fits the first three application types, or if max results is set to 3, then - three is returned. To access the rest of the results, retrieve subsequent pages by using the - returned continuation token in the next query. An empty continuation token is returned if there - are no subsequent pages. - - :param application_type_definition_kind_filter: Used to filter on ApplicationTypeDefinitionKind - which is the mechanism used to define a Service Fabric application type. - - - * Default - Default value, which performs the same function as selecting "All". The value is - 0. - * All - Filter that matches input with any ApplicationTypeDefinitionKind value. The value is - 65535. - * ServiceFabricApplicationPackage - Filter that matches input with - ApplicationTypeDefinitionKind value ServiceFabricApplicationPackage. The value is 1. - * Compose - Filter that matches input with ApplicationTypeDefinitionKind value Compose. The - value is 2. - :type application_type_definition_kind_filter: int - :param exclude_application_parameters: The flag that specifies whether application parameters - will be excluded from the result. - :type exclude_application_parameters: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedApplicationTypeInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationTypeInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_type_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if application_type_definition_kind_filter is not None: - query_parameters['ApplicationTypeDefinitionKindFilter'] = self._serialize.query("application_type_definition_kind_filter", application_type_definition_kind_filter, 'int') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedApplicationTypeInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_type_info_list.metadata = {'url': '/ApplicationTypes'} # type: ignore - - def get_application_type_info_list_by_name( - self, - application_type_name, # type: str - application_type_version=None, # type: Optional[str] - exclude_application_parameters=False, # type: Optional[bool] - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedApplicationTypeInfoList" - """Gets the list of application types in the Service Fabric cluster matching exactly the specified name. - - Returns the information about the application types that are provisioned or in the process of - being provisioned in the Service Fabric cluster. These results are of application types whose - name match exactly the one specified as the parameter, and which comply with the given query - parameters. All versions of the application type matching the application type name are - returned, with each version returned as one application type. The response includes the name, - version, status, and other details about the application type. This is a paged query, meaning - that if not all of the application types fit in a page, one page of results is returned as well - as a continuation token, which can be used to get the next page. For example, if there are 10 - application types but a page only fits the first three application types, or if max results is - set to 3, then three is returned. To access the rest of the results, retrieve subsequent pages - by using the returned continuation token in the next query. An empty continuation token is - returned if there are no subsequent pages. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param exclude_application_parameters: The flag that specifies whether application parameters - will be excluded from the result. - :type exclude_application_parameters: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedApplicationTypeInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedApplicationTypeInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationTypeInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_type_info_list_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if application_type_version is not None: - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedApplicationTypeInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_type_info_list_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}'} # type: ignore - - def provision_application_type( - self, - provision_application_type_description_base_required_body_param, # type: "_models.ProvisionApplicationTypeDescriptionBase" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Provisions or registers a Service Fabric application type with the cluster using the '.sfpkg' package in the external store or using the application package in the image store. - - Provisions a Service Fabric application type with the cluster. The provision is required before - any new applications can be instantiated. - The provision operation can be performed either on the application package specified by the - relativePathInImageStore, or by using the URI of the external '.sfpkg'. - - :param provision_application_type_description_base_required_body_param: The base type of - provision application type description which supports either image store-based provision or - external store-based provision. - :type provision_application_type_description_base_required_body_param: ~azure.servicefabric.models.ProvisionApplicationTypeDescriptionBase - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.provision_application_type.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(provision_application_type_description_base_required_body_param, 'ProvisionApplicationTypeDescriptionBase') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - provision_application_type.metadata = {'url': '/ApplicationTypes/$/Provision'} # type: ignore - - def unprovision_application_type( - self, - application_type_name, # type: str - application_type_version, # type: str - timeout=60, # type: Optional[int] - async_parameter=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> None - """Removes or unregisters a Service Fabric application type from the cluster. - - This operation can only be performed if all application instances of the application type have - been deleted. Once the application type is unregistered, no new application instances can be - created for this particular application type. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type as defined in the - application manifest. - :type application_type_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param async_parameter: The flag indicating whether or not unprovision should occur - asynchronously. When set to true, the unprovision operation returns when the request is - accepted by the system, and the unprovision operation continues without any timeout limit. The - default value is false. However, we recommend setting it to true for large application packages - that were provisioned. - :type async_parameter: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _unprovision_application_type_description_info = _models.UnprovisionApplicationTypeDescriptionInfo(application_type_version=application_type_version, async_property=async_parameter) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.unprovision_application_type.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_unprovision_application_type_description_info, 'UnprovisionApplicationTypeDescriptionInfo') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - unprovision_application_type.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/Unprovision'} # type: ignore - - def get_service_type_info_list( - self, - application_type_name, # type: str - application_type_version, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ServiceTypeInfo"] - """Gets the list containing the information about service types that are supported by a provisioned application type in a Service Fabric cluster. - - Gets the list containing the information about service types that are supported by a - provisioned application type in a Service Fabric cluster. The provided application type must - exist. Otherwise, a 404 status is returned. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ServiceTypeInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ServiceTypeInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceTypeInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_type_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ServiceTypeInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_type_info_list.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes'} # type: ignore - - def get_service_type_info_by_name( - self, - application_type_name, # type: str - application_type_version, # type: str - service_type_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.ServiceTypeInfo"] - """Gets the information about a specific service type that is supported by a provisioned application type in a Service Fabric cluster. - - Gets the information about a specific service type that is supported by a provisioned - application type in a Service Fabric cluster. The provided application type must exist. - Otherwise, a 404 status is returned. A 204 response is returned if the specified service type - is not found in the cluster. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param service_type_name: Specifies the name of a Service Fabric service type. - :type service_type_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceTypeInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceTypeInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServiceTypeInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_type_info_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceTypeInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_type_info_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes/{serviceTypeName}'} # type: ignore - - def get_service_manifest( - self, - application_type_name, # type: str - application_type_version, # type: str - service_manifest_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ServiceTypeManifest" - """Gets the manifest describing a service type. - - Gets the manifest describing a service type. The response contains the service manifest XML as - a string. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceTypeManifest, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceTypeManifest - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceTypeManifest"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_manifest.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceTypeManifest', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceManifest'} # type: ignore - - def get_deployed_service_type_info_list( - self, - node_name, # type: str - application_id, # type: str - service_manifest_name=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List["_models.DeployedServiceTypeInfo"] - """Gets the list containing the information about service types from the applications deployed on a node in a Service Fabric cluster. - - Gets the list containing the information about service types from the applications deployed on - a node in a Service Fabric cluster. The response includes the name of the service type, its - registration status, the code package that registered it and activation ID of the service - package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_manifest_name: The name of the service manifest to filter the list of deployed - service type information. If specified, the response will only contain the information about - service types that are defined in this service manifest. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServiceTypeInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedServiceTypeInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_type_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[DeployedServiceTypeInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_type_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes'} # type: ignore - - def get_deployed_service_type_info_by_name( - self, - node_name, # type: str - application_id, # type: str - service_type_name, # type: str - service_manifest_name=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional[List["_models.DeployedServiceTypeInfo"]] - """Gets the information about a specified service type of the application deployed on a node in a Service Fabric cluster. - - Gets the list containing the information about a specific service type from the applications - deployed on a node in a Service Fabric cluster. The response includes the name of the service - type, its registration status, the code package that registered it and activation ID of the - service package. Each entry represents one activation of a service type, differentiated by the - activation ID. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_type_name: Specifies the name of a Service Fabric service type. - :type service_type_name: str - :param service_manifest_name: The name of the service manifest to filter the list of deployed - service type information. If specified, the response will only contain the information about - service types that are defined in this service manifest. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServiceTypeInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServiceTypeInfo] or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServiceTypeInfo"]]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_type_info_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServiceTypeInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_type_info_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes/{serviceTypeName}'} # type: ignore - - def create_application( - self, - application_description, # type: "_models.ApplicationDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a Service Fabric application. - - Creates a Service Fabric application using the specified description. - - :param application_description: Description for creating an application. - :type application_description: ~azure.servicefabric.models.ApplicationDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_application.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(application_description, 'ApplicationDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_application.metadata = {'url': '/Applications/$/Create'} # type: ignore - - def delete_application( - self, - application_id, # type: str - force_remove=None, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes an existing Service Fabric application. - - An application must be created before it can be deleted. Deleting an application will delete - all services that are part of that application. By default, Service Fabric will try to close - service replicas in a graceful manner and then delete the service. However, if a service is - having issues closing the replica gracefully, the delete operation may take a long time or get - stuck. Use the optional ForceRemove flag to skip the graceful close sequence and forcefully - delete the application and all of its services. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param force_remove: Remove a Service Fabric application or service forcefully without going - through the graceful shutdown sequence. This parameter can be used to forcefully delete an - application or service for which delete is timing out due to issues in the service code that - prevents graceful close of replicas. - :type force_remove: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_application.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force_remove is not None: - query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_application.metadata = {'url': '/Applications/{applicationId}/$/Delete'} # type: ignore - - def get_application_load_info( - self, - application_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.ApplicationLoadInfo"] - """Gets load information about a Service Fabric application. - - Returns the load information about the application that was created or in the process of being - created in the Service Fabric cluster and whose name matches the one specified as the - parameter. The response includes the name, minimum nodes, maximum nodes, the number of nodes - the application is occupying currently, and application load metric information about the - application. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationLoadInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationLoadInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationLoadInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_load_info.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationLoadInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_load_info.metadata = {'url': '/Applications/{applicationId}/$/GetLoadInformation'} # type: ignore - - def get_application_info_list( - self, - application_definition_kind_filter=0, # type: Optional[int] - application_type_name=None, # type: Optional[str] - exclude_application_parameters=False, # type: Optional[bool] - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedApplicationInfoList" - """Gets the list of applications created in the Service Fabric cluster that match the specified filters. - - Gets the information about the applications that were created or in the process of being - created in the Service Fabric cluster and match the specified filters. The response includes - the name, type, status, parameters, and other details about the application. If the - applications do not fit in a page, one page of results is returned as well as a continuation - token, which can be used to get the next page. Filters ApplicationTypeName and - ApplicationDefinitionKindFilter cannot be specified at the same time. - - :param application_definition_kind_filter: Used to filter on ApplicationDefinitionKind, which - is the mechanism used to define a Service Fabric application. - - - * Default - Default value, which performs the same function as selecting "All". The value is - 0. - * All - Filter that matches input with any ApplicationDefinitionKind value. The value is - 65535. - * ServiceFabricApplicationDescription - Filter that matches input with - ApplicationDefinitionKind value ServiceFabricApplicationDescription. The value is 1. - * Compose - Filter that matches input with ApplicationDefinitionKind value Compose. The value - is 2. - :type application_definition_kind_filter: int - :param application_type_name: The application type name used to filter the applications to - query for. This value should not contain the application type version. - :type application_type_name: str - :param exclude_application_parameters: The flag that specifies whether application parameters - will be excluded from the result. - :type exclude_application_parameters: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedApplicationInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedApplicationInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedApplicationInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if application_definition_kind_filter is not None: - query_parameters['ApplicationDefinitionKindFilter'] = self._serialize.query("application_definition_kind_filter", application_definition_kind_filter, 'int') - if application_type_name is not None: - query_parameters['ApplicationTypeName'] = self._serialize.query("application_type_name", application_type_name, 'str') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedApplicationInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_info_list.metadata = {'url': '/Applications'} # type: ignore - - def get_application_info( - self, - application_id, # type: str - exclude_application_parameters=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.ApplicationInfo"] - """Gets information about a Service Fabric application. - - Returns the information about the application that was created or in the process of being - created in the Service Fabric cluster and whose name matches the one specified as the - parameter. The response includes the name, type, status, parameters, and other details about - the application. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param exclude_application_parameters: The flag that specifies whether application parameters - will be excluded from the result. - :type exclude_application_parameters: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_info.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if exclude_application_parameters is not None: - query_parameters['ExcludeApplicationParameters'] = self._serialize.query("exclude_application_parameters", exclude_application_parameters, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ApplicationInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_info.metadata = {'url': '/Applications/{applicationId}'} # type: ignore - - def get_application_health( - self, - application_id, # type: str - events_health_state_filter=0, # type: Optional[int] - deployed_applications_health_state_filter=0, # type: Optional[int] - services_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ApplicationHealth" - """Gets the health of the service fabric application. - - Returns the heath state of the service fabric application. The response reports either Ok, - Error or Warning health state. If the entity is not found in the health store, it will return - Error. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param deployed_applications_health_state_filter: Allows filtering of the deployed applications - health state objects returned in the result of application health query based on their health - state. - The possible values for this parameter include integer value of one of the following health - states. Only deployed applications that match the filter will be returned. - All deployed applications are used to evaluate the aggregated health state. If not specified, - all entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values, obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of deployed applications with - HealthState value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type deployed_applications_health_state_filter: int - :param services_health_state_filter: Allows filtering of the services health state objects - returned in the result of services health query based on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only services that match the filter are returned. All services are used to evaluate the - aggregated health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, - obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health - state of services with HealthState value of OK (2) and Warning (4) will be returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type services_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_health.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_applications_health_state_filter is not None: - query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') - if services_health_state_filter is not None: - query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_health.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} # type: ignore - - def get_application_health_using_policy( - self, - application_id, # type: str - events_health_state_filter=0, # type: Optional[int] - deployed_applications_health_state_filter=0, # type: Optional[int] - services_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ApplicationHealth" - """Gets the health of a Service Fabric application using the specified policy. - - Gets the health of a Service Fabric application. Use EventsHealthStateFilter to filter the - collection of health events reported on the node based on the health state. Use - ClusterHealthPolicies to override the health policies used to evaluate the health. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param deployed_applications_health_state_filter: Allows filtering of the deployed applications - health state objects returned in the result of application health query based on their health - state. - The possible values for this parameter include integer value of one of the following health - states. Only deployed applications that match the filter will be returned. - All deployed applications are used to evaluate the aggregated health state. If not specified, - all entries are returned. - The state values are flag-based enumeration, so the value could be a combination of these - values, obtained using bitwise 'OR' operator. - For example, if the provided value is 6 then health state of deployed applications with - HealthState value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type deployed_applications_health_state_filter: int - :param services_health_state_filter: Allows filtering of the services health state objects - returned in the result of services health query based on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only services that match the filter are returned. All services are used to evaluate the - aggregated health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, - obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health - state of services with HealthState value of OK (2) and Warning (4) will be returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type services_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_application_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_applications_health_state_filter is not None: - query_parameters['DeployedApplicationsHealthStateFilter'] = self._serialize.query("deployed_applications_health_state_filter", deployed_applications_health_state_filter, 'int') - if services_health_state_filter is not None: - query_parameters['ServicesHealthStateFilter'] = self._serialize.query("services_health_state_filter", services_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_health_using_policy.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} # type: ignore - - def report_application_health( - self, - application_id, # type: str - health_information, # type: "_models.HealthInformation" - immediate=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sends a health report on the Service Fabric application. - - Reports health state of the specified Service Fabric application. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway Application, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, get application health and check - that the report appears in the HealthEvents section. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_application_health.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_application_health.metadata = {'url': '/Applications/{applicationId}/$/ReportHealth'} # type: ignore - - def start_application_upgrade( - self, - application_id, # type: str - application_upgrade_description, # type: "_models.ApplicationUpgradeDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Starts upgrading an application in the Service Fabric cluster. - - Validates the supplied application upgrade parameters and starts upgrading the application if - the parameters are valid. - Note, `ApplicationParameter - `_\ - s are not preserved across an application upgrade. - In order to preserve current application parameters, the user should get the parameters using - `GetApplicationInfo <./GetApplicationInfo.md>`_ operation first and pass them into the upgrade - API call as shown in the example. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param application_upgrade_description: Parameters for an application upgrade. - :type application_upgrade_description: ~azure.servicefabric.models.ApplicationUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(application_upgrade_description, 'ApplicationUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/Upgrade'} # type: ignore - - def get_application_upgrade( - self, - application_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ApplicationUpgradeProgressInfo" - """Gets details for the latest upgrade performed on this application. - - Returns information about the state of the latest application upgrade along with details to aid - debugging application health issues. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationUpgradeProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationUpgradeProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationUpgradeProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationUpgradeProgressInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/GetUpgradeProgress'} # type: ignore - - def update_application_upgrade( - self, - application_id, # type: str - application_upgrade_update_description, # type: "_models.ApplicationUpgradeUpdateDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Updates an ongoing application upgrade in the Service Fabric cluster. - - Updates the parameters of an ongoing application upgrade from the ones specified at the time of - starting the application upgrade. This may be required to mitigate stuck application upgrades - due to incorrect parameters or issues in the application to make progress. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param application_upgrade_update_description: Parameters for updating an existing application - upgrade. - :type application_upgrade_update_description: ~azure.servicefabric.models.ApplicationUpgradeUpdateDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(application_upgrade_update_description, 'ApplicationUpgradeUpdateDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - update_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/UpdateUpgrade'} # type: ignore - - def resume_application_upgrade( - self, - application_id, # type: str - upgrade_domain_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Resumes upgrading an application in the Service Fabric cluster. - - Resumes an unmonitored manual Service Fabric application upgrade. Service Fabric upgrades one - upgrade domain at a time. For unmonitored manual upgrades, after Service Fabric finishes an - upgrade domain, it waits for you to call this API before proceeding to the next upgrade domain. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param upgrade_domain_name: The name of the upgrade domain in which to resume the upgrade. - :type upgrade_domain_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _resume_application_upgrade_description = _models.ResumeApplicationUpgradeDescription(upgrade_domain_name=upgrade_domain_name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.resume_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_resume_application_upgrade_description, 'ResumeApplicationUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/MoveToNextUpgradeDomain'} # type: ignore - - def rollback_application_upgrade( - self, - application_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Starts rolling back the currently on-going upgrade of an application in the Service Fabric cluster. - - Starts rolling back the current application upgrade to the previous version. This API can only - be used to roll back the current in-progress upgrade that is rolling forward to new version. If - the application is not currently being upgraded use StartApplicationUpgrade API to upgrade it - to desired version, including rolling back to a previous version. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.rollback_application_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - rollback_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/RollbackUpgrade'} # type: ignore - - def get_deployed_application_info_list( - self, - node_name, # type: str - timeout=60, # type: Optional[int] - include_health_state=False, # type: Optional[bool] - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedDeployedApplicationInfoList" - """Gets the list of applications deployed on a Service Fabric node. - - Gets the list of applications deployed on a Service Fabric node. The results do not include - information about deployed system applications unless explicitly queried for by ID. Results - encompass deployed applications in active, activating, and downloading states. This query - requires that the node name corresponds to a node on the cluster. The query fails if the - provided node name does not point to any active Service Fabric nodes on the cluster. - - :param node_name: The name of the node. - :type node_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param include_health_state: Include the health state of an entity. - If this parameter is false or not specified, then the health state returned is "Unknown". - When set to true, the query goes in parallel to the node and the health system service before - the results are merged. - As a result, the query is more expensive and may take a longer time. - :type include_health_state: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedDeployedApplicationInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedDeployedApplicationInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedDeployedApplicationInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_application_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if include_health_state is not None: - query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedDeployedApplicationInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_application_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications'} # type: ignore - - def get_deployed_application_info( - self, - node_name, # type: str - application_id, # type: str - timeout=60, # type: Optional[int] - include_health_state=False, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.DeployedApplicationInfo"] - """Gets the information about an application deployed on a Service Fabric node. - - This query returns system application information if the application ID provided is for system - application. Results encompass deployed applications in active, activating, and downloading - states. This query requires that the node name corresponds to a node on the cluster. The query - fails if the provided node name does not point to any active Service Fabric nodes on the - cluster. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param include_health_state: Include the health state of an entity. - If this parameter is false or not specified, then the health state returned is "Unknown". - When set to true, the query goes in parallel to the node and the health system service before - the results are merged. - As a result, the query is more expensive and may take a longer time. - :type include_health_state: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedApplicationInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedApplicationInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.DeployedApplicationInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_application_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if include_health_state is not None: - query_parameters['IncludeHealthState'] = self._serialize.query("include_health_state", include_health_state, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('DeployedApplicationInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_application_info.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}'} # type: ignore - - def get_deployed_application_health( - self, - node_name, # type: str - application_id, # type: str - events_health_state_filter=0, # type: Optional[int] - deployed_service_packages_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.DeployedApplicationHealth" - """Gets the information about health of an application deployed on a Service Fabric node. - - Gets the information about health of an application deployed on a Service Fabric node. Use - EventsHealthStateFilter to optionally filter for the collection of HealthEvent objects reported - on the deployed application based on health state. Use DeployedServicePackagesHealthStateFilter - to optionally filter for DeployedServicePackageHealth children based on health state. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param deployed_service_packages_health_state_filter: Allows filtering of the deployed service - package health state objects returned in the result of deployed application health query based - on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only deployed service packages that match the filter are returned. All deployed service - packages are used to evaluate the aggregated health state of the deployed application. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value can be a combination of these - values, obtained using the bitwise 'OR' operator. - For example, if the provided value is 6 then health state of service packages with HealthState - value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type deployed_service_packages_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedApplicationHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedApplicationHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedApplicationHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_application_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_service_packages_health_state_filter is not None: - query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedApplicationHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} # type: ignore - - def get_deployed_application_health_using_policy( - self, - node_name, # type: str - application_id, # type: str - events_health_state_filter=0, # type: Optional[int] - deployed_service_packages_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] - **kwargs # type: Any - ): - # type: (...) -> "_models.DeployedApplicationHealth" - """Gets the information about health of an application deployed on a Service Fabric node. using the specified policy. - - Gets the information about health of an application deployed on a Service Fabric node using the - specified policy. Use EventsHealthStateFilter to optionally filter for the collection of - HealthEvent objects reported on the deployed application based on health state. Use - DeployedServicePackagesHealthStateFilter to optionally filter for DeployedServicePackageHealth - children based on health state. Use ApplicationHealthPolicy to optionally override the health - policies used to evaluate the health. This API only uses 'ConsiderWarningAsError' field of the - ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the - deployed application. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param deployed_service_packages_health_state_filter: Allows filtering of the deployed service - package health state objects returned in the result of deployed application health query based - on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only deployed service packages that match the filter are returned. All deployed service - packages are used to evaluate the aggregated health state of the deployed application. - If not specified, all entries are returned. - The state values are flag-based enumeration, so the value can be a combination of these - values, obtained using the bitwise 'OR' operator. - For example, if the provided value is 6 then health state of service packages with HealthState - value of OK (2) and Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type deployed_service_packages_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedApplicationHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedApplicationHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedApplicationHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_deployed_application_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if deployed_service_packages_health_state_filter is not None: - query_parameters['DeployedServicePackagesHealthStateFilter'] = self._serialize.query("deployed_service_packages_health_state_filter", deployed_service_packages_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedApplicationHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_application_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} # type: ignore - - def report_deployed_application_health( - self, - node_name, # type: str - application_id, # type: str - health_information, # type: "_models.HealthInformation" - immediate=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sends a health report on the Service Fabric application deployed on a Service Fabric node. - - Reports health state of the application deployed on a Service Fabric node. The report must - contain the information about the source of the health report and property on which it is - reported. - The report is sent to a Service Fabric gateway Service, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, get deployed application health and - check that the report appears in the HealthEvents section. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_deployed_application_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/ReportHealth'} # type: ignore - - def get_application_manifest( - self, - application_type_name, # type: str - application_type_version, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ApplicationTypeManifest" - """Gets the manifest describing an application type. - - The response contains the application manifest XML as a string. - - :param application_type_name: The name of the application type. - :type application_type_name: str - :param application_type_version: The version of the application type. - :type application_type_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationTypeManifest, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationTypeManifest - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationTypeManifest"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_manifest.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationTypeManifest', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetApplicationManifest'} # type: ignore - - def get_service_info_list( - self, - application_id, # type: str - service_type_name=None, # type: Optional[str] - continuation_token_parameter=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedServiceInfoList" - """Gets the information about all services belonging to the application specified by the application ID. - - Returns the information about all services belonging to the application specified by the - application ID. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_type_name: The service type name used to filter the services to query for. - :type service_type_name: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedServiceInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedServiceInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServiceInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if service_type_name is not None: - query_parameters['ServiceTypeName'] = self._serialize.query("service_type_name", service_type_name, 'str') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedServiceInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_info_list.metadata = {'url': '/Applications/{applicationId}/$/GetServices'} # type: ignore - - def get_service_info( - self, - application_id, # type: str - service_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.ServiceInfo"] - """Gets the information about the specific service belonging to the Service Fabric application. - - Returns the information about the specified service belonging to the specified Service Fabric - application. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServiceInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_info.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServiceInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_info.metadata = {'url': '/Applications/{applicationId}/$/GetServices/{serviceId}'} # type: ignore - - def get_application_name_info( - self, - service_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ApplicationNameInfo" - """Gets the name of the Service Fabric application for a service. - - Gets the name of the application for the specified service. A 404 - FABRIC_E_SERVICE_DOES_NOT_EXIST error is returned if a service with the provided service ID - does not exist. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ApplicationNameInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ApplicationNameInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationNameInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_name_info.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ApplicationNameInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_name_info.metadata = {'url': '/Services/{serviceId}/$/GetApplicationName'} # type: ignore - - def create_service( - self, - application_id, # type: str - service_description, # type: "_models.ServiceDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates the specified Service Fabric service. - - This api allows creating a new Service Fabric stateless or stateful service under a specified - Service Fabric application. The description for creating the service includes partitioning - information and optional properties for placement and load balancing. Some of the properties - can later be modified using ``UpdateService`` API. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_description: The information necessary to create a service. - :type service_description: ~azure.servicefabric.models.ServiceDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_service.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(service_description, 'ServiceDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_service.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/Create'} # type: ignore - - def create_service_from_template( - self, - application_id, # type: str - service_from_template_description, # type: "_models.ServiceFromTemplateDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a Service Fabric service from the service template. - - Creates a Service Fabric service from the service template defined in the application manifest. - A service template contains the properties that will be same for the service instance of the - same type. The API allows overriding the properties that are usually different for different - services of the same service type. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_from_template_description: Describes the service that needs to be created from - the template defined in the application manifest. - :type service_from_template_description: ~azure.servicefabric.models.ServiceFromTemplateDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_service_from_template.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(service_from_template_description, 'ServiceFromTemplateDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_service_from_template.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/CreateFromTemplate'} # type: ignore - - def delete_service( - self, - service_id, # type: str - force_remove=None, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes an existing Service Fabric service. - - A service must be created before it can be deleted. By default, Service Fabric will try to - close service replicas in a graceful manner and then delete the service. However, if the - service is having issues closing the replica gracefully, the delete operation may take a long - time or get stuck. Use the optional ForceRemove flag to skip the graceful close sequence and - forcefully delete the service. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param force_remove: Remove a Service Fabric application or service forcefully without going - through the graceful shutdown sequence. This parameter can be used to forcefully delete an - application or service for which delete is timing out due to issues in the service code that - prevents graceful close of replicas. - :type force_remove: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_service.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force_remove is not None: - query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_service.metadata = {'url': '/Services/{serviceId}/$/Delete'} # type: ignore - - def update_service( - self, - service_id, # type: str - service_update_description, # type: "_models.ServiceUpdateDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Updates a Service Fabric service using the specified update description. - - This API allows updating properties of a running Service Fabric service. The set of properties - that can be updated are a subset of the properties that were specified at the time of creating - the service. The current set of properties can be obtained using ``GetServiceDescription`` API. - Note that updating the properties of a running service is different than upgrading your - application using ``StartApplicationUpgrade`` API. The upgrade is a long running background - operation that involves moving the application from one version to another, one upgrade domain - at a time, whereas update applies the new properties immediately to the service. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param service_update_description: The information necessary to update a service. - :type service_update_description: ~azure.servicefabric.models.ServiceUpdateDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_service.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(service_update_description, 'ServiceUpdateDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - update_service.metadata = {'url': '/Services/{serviceId}/$/Update'} # type: ignore - - def get_service_description( - self, - service_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ServiceDescription" - """Gets the description of an existing Service Fabric service. - - Gets the description of an existing Service Fabric service. A service must be created before - its description can be obtained. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_description.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_description.metadata = {'url': '/Services/{serviceId}/$/GetDescription'} # type: ignore - - def get_service_health( - self, - service_id, # type: str - events_health_state_filter=0, # type: Optional[int] - partitions_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ServiceHealth" - """Gets the health of the specified Service Fabric service. - - Gets the health information of the specified service. - Use EventsHealthStateFilter to filter the collection of health events reported on the service - based on the health state. - Use PartitionsHealthStateFilter to filter the collection of partitions returned. - If you specify a service that does not exist in the health store, this request returns an - error. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param partitions_health_state_filter: Allows filtering of the partitions health state objects - returned in the result of service health query based on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only partitions that match the filter are returned. All partitions are used to evaluate the - aggregated health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these value - obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health - state of partitions with HealthState value of OK (2) and Warning (4) will be returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type partitions_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_health.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if partitions_health_state_filter is not None: - query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_health.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} # type: ignore - - def get_service_health_using_policy( - self, - service_id, # type: str - events_health_state_filter=0, # type: Optional[int] - partitions_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ServiceHealth" - """Gets the health of the specified Service Fabric service, by using the specified health policy. - - Gets the health information of the specified service. - If the application health policy is specified, the health evaluation uses it to get the - aggregated health state. - If the policy is not specified, the health evaluation uses the application health policy - defined in the application manifest, or the default health policy, if no policy is defined in - the manifest. - Use EventsHealthStateFilter to filter the collection of health events reported on the service - based on the health state. - Use PartitionsHealthStateFilter to filter the collection of partitions returned. - If you specify a service that does not exist in the health store, this request returns an - error. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param partitions_health_state_filter: Allows filtering of the partitions health state objects - returned in the result of service health query based on their health state. - The possible values for this parameter include integer value of one of the following health - states. - Only partitions that match the filter are returned. All partitions are used to evaluate the - aggregated health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these value - obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health - state of partitions with HealthState value of OK (2) and Warning (4) will be returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type partitions_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_service_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if partitions_health_state_filter is not None: - query_parameters['PartitionsHealthStateFilter'] = self._serialize.query("partitions_health_state_filter", partitions_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_health_using_policy.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} # type: ignore - - def report_service_health( - self, - service_id, # type: str - health_information, # type: "_models.HealthInformation" - immediate=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sends a health report on the Service Fabric service. - - Reports health state of the specified Service Fabric service. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway Service, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetServiceHealth and check that - the report appears in the HealthEvents section. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_service_health.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_service_health.metadata = {'url': '/Services/{serviceId}/$/ReportHealth'} # type: ignore - - def resolve_service( - self, - service_id, # type: str - partition_key_type=None, # type: Optional[int] - partition_key_value=None, # type: Optional[str] - previous_rsp_version=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ResolvedServicePartition" - """Resolve a Service Fabric partition. - - Resolve a Service Fabric service partition to get the endpoints of the service replicas. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_key_type: Key type for the partition. This parameter is required if the - partition scheme for the service is Int64Range or Named. The possible values are following. - - - * None (1) - Indicates that the PartitionKeyValue parameter is not specified. This is valid - for the partitions with partitioning scheme as Singleton. This is the default value. The value - is 1. - * Int64Range (2) - Indicates that the PartitionKeyValue parameter is an int64 partition key. - This is valid for the partitions with partitioning scheme as Int64Range. The value is 2. - * Named (3) - Indicates that the PartitionKeyValue parameter is a name of the partition. This - is valid for the partitions with partitioning scheme as Named. The value is 3. - :type partition_key_type: int - :param partition_key_value: Partition key. This is required if the partition scheme for the - service is Int64Range or Named. - This is not the partition ID, but rather, either the integer key value, or the name of the - partition ID. - For example, if your service is using ranged partitions from 0 to 10, then they - PartitionKeyValue would be an - integer in that range. Query service description to see the range or name. - :type partition_key_value: str - :param previous_rsp_version: The value in the Version field of the response that was received - previously. This is required if the user knows that the result that was gotten previously is - stale. - :type previous_rsp_version: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ResolvedServicePartition, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ResolvedServicePartition - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ResolvedServicePartition"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.resolve_service.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if partition_key_type is not None: - query_parameters['PartitionKeyType'] = self._serialize.query("partition_key_type", partition_key_type, 'int') - if partition_key_value is not None: - query_parameters['PartitionKeyValue'] = self._serialize.query("partition_key_value", partition_key_value, 'str', skip_quote=True) - if previous_rsp_version is not None: - query_parameters['PreviousRspVersion'] = self._serialize.query("previous_rsp_version", previous_rsp_version, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ResolvedServicePartition', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - resolve_service.metadata = {'url': '/Services/{serviceId}/$/ResolvePartition'} # type: ignore - - def get_unplaced_replica_information( - self, - service_id, # type: str - partition_id=None, # type: Optional[str] - only_query_primaries=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.UnplacedReplicaInformation" - """Gets the information about unplaced replica of the service. - - Returns the information about the unplaced replicas of the service. - If PartitionId is specified, then result will contain information only about unplaced replicas - for that partition. - If PartitionId is not specified, then result will contain information about unplaced replicas - for all partitions of that service. - If OnlyQueryPrimaries is set to true, then result will contain information only about primary - replicas, and will ignore unplaced secondary replicas. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param only_query_primaries: Indicates that unplaced replica information will be queries only - for primary replicas. - :type only_query_primaries: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UnplacedReplicaInformation, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UnplacedReplicaInformation - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UnplacedReplicaInformation"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_unplaced_replica_information.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if partition_id is not None: - query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') - if only_query_primaries is not None: - query_parameters['OnlyQueryPrimaries'] = self._serialize.query("only_query_primaries", only_query_primaries, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UnplacedReplicaInformation', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_unplaced_replica_information.metadata = {'url': '/Services/{serviceId}/$/GetUnplacedReplicaInformation'} # type: ignore - - def get_loaded_partition_info_list( - self, - metric_name, # type: str - service_name=None, # type: Optional[str] - ordering=None, # type: Optional[Union[str, "_models.Ordering"]] - max_results=0, # type: Optional[int] - continuation_token_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.LoadedPartitionInformationResultList" - """Gets ordered list of partitions. - - Retrieves partitions which are most/least loaded according to specified metric. - - :param metric_name: Name of the metric based on which to get ordered list of partitions. - :type metric_name: str - :param service_name: The name of a service. - :type service_name: str - :param ordering: Ordering of partitions' load. - :type ordering: str or ~azure.servicefabric.models.Ordering - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: LoadedPartitionInformationResultList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.LoadedPartitionInformationResultList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadedPartitionInformationResultList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_loaded_partition_info_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['MetricName'] = self._serialize.query("metric_name", metric_name, 'str') - if service_name is not None: - query_parameters['ServiceName'] = self._serialize.query("service_name", service_name, 'str') - if ordering is not None: - query_parameters['Ordering'] = self._serialize.query("ordering", ordering, 'str') - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('LoadedPartitionInformationResultList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_loaded_partition_info_list.metadata = {'url': '/$/GetLoadedPartitionInfoList'} # type: ignore - - def get_partition_info_list( - self, - service_id, # type: str - continuation_token_parameter=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedServicePartitionInfoList" - """Gets the list of partitions of a Service Fabric service. - - The response includes the partition ID, partitioning scheme information, keys supported by the - partition, status, health, and other details about the partition. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedServicePartitionInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedServicePartitionInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedServicePartitionInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedServicePartitionInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_info_list.metadata = {'url': '/Services/{serviceId}/$/GetPartitions'} # type: ignore - - def get_partition_info( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.ServicePartitionInfo"] - """Gets the information about a Service Fabric partition. - - Gets the information about the specified partition. The response includes the partition ID, - partitioning scheme information, keys supported by the partition, status, health, and other - details about the partition. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServicePartitionInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServicePartitionInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServicePartitionInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_info.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ServicePartitionInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_info.metadata = {'url': '/Partitions/{partitionId}'} # type: ignore - - def get_service_name_info( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ServiceNameInfo" - """Gets the name of the Service Fabric service for a partition. - - Gets name of the service for the specified partition. A 404 error is returned if the partition - ID does not exist in the cluster. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ServiceNameInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ServiceNameInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceNameInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_name_info.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ServiceNameInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_name_info.metadata = {'url': '/Partitions/{partitionId}/$/GetServiceName'} # type: ignore - - def get_partition_health( - self, - partition_id, # type: str - events_health_state_filter=0, # type: Optional[int] - replicas_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PartitionHealth" - """Gets the health of the specified Service Fabric partition. - - Use EventsHealthStateFilter to filter the collection of health events reported on the service - based on the health state. - Use ReplicasHealthStateFilter to filter the collection of ReplicaHealthState objects on the - partition. - If you specify a partition that does not exist in the health store, this request returns an - error. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param replicas_health_state_filter: Allows filtering the collection of ReplicaHealthState - objects on the partition. The value can be obtained from members or bitwise operations on - members of HealthStateFilter. Only replicas that match the filter will be returned. All - replicas will be used to evaluate the aggregated health state. If not specified, all entries - will be returned.The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. For example, If the provided - value is 6 then all of the events with HealthState value of OK (2) and Warning (4) will be - returned. The possible values for this parameter include integer value of one of the following - health states. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type replicas_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_health.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if replicas_health_state_filter is not None: - query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} # type: ignore - - def get_partition_health_using_policy( - self, - partition_id, # type: str - events_health_state_filter=0, # type: Optional[int] - replicas_health_state_filter=0, # type: Optional[int] - exclude_health_statistics=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] - **kwargs # type: Any - ): - # type: (...) -> "_models.PartitionHealth" - """Gets the health of the specified Service Fabric partition, by using the specified health policy. - - Gets the health information of the specified partition. - If the application health policy is specified, the health evaluation uses it to get the - aggregated health state. - If the policy is not specified, the health evaluation uses the application health policy - defined in the application manifest, or the default health policy, if no policy is defined in - the manifest. - Use EventsHealthStateFilter to filter the collection of health events reported on the partition - based on the health state. - Use ReplicasHealthStateFilter to filter the collection of ReplicaHealthState objects on the - partition. Use ApplicationHealthPolicy in the POST body to override the health policies used to - evaluate the health. - If you specify a partition that does not exist in the health store, this request returns an - error. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param replicas_health_state_filter: Allows filtering the collection of ReplicaHealthState - objects on the partition. The value can be obtained from members or bitwise operations on - members of HealthStateFilter. Only replicas that match the filter will be returned. All - replicas will be used to evaluate the aggregated health state. If not specified, all entries - will be returned.The state values are flag-based enumeration, so the value could be a - combination of these values obtained using bitwise 'OR' operator. For example, If the provided - value is 6 then all of the events with HealthState value of OK (2) and Warning (4) will be - returned. The possible values for this parameter include integer value of one of the following - health states. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type replicas_health_state_filter: int - :param exclude_health_statistics: Indicates whether the health statistics should be returned as - part of the query result. False by default. - The statistics show the number of children entities in health state Ok, Warning, and Error. - :type exclude_health_statistics: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_partition_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if replicas_health_state_filter is not None: - query_parameters['ReplicasHealthStateFilter'] = self._serialize.query("replicas_health_state_filter", replicas_health_state_filter, 'int') - if exclude_health_statistics is not None: - query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} # type: ignore - - def report_partition_health( - self, - partition_id, # type: str - health_information, # type: "_models.HealthInformation" - immediate=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sends a health report on the Service Fabric partition. - - Reports health state of the specified Service Fabric partition. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway Partition, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetPartitionHealth and check - that the report appears in the HealthEvents section. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_partition_health.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/ReportHealth'} # type: ignore - - def get_partition_load_information( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PartitionLoadInformation" - """Gets the load information of the specified Service Fabric partition. - - Returns information about the load of a specified partition. - The response includes a list of load reports for a Service Fabric partition. - Each report includes the load metric name, value, and last reported time in UTC. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionLoadInformation, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionLoadInformation - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionLoadInformation"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_load_information.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionLoadInformation', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_load_information.metadata = {'url': '/Partitions/{partitionId}/$/GetLoadInformation'} # type: ignore - - def reset_partition_load( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Resets the current load of a Service Fabric partition. - - Resets the current load of a Service Fabric partition to the default load for the service. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.reset_partition_load.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - reset_partition_load.metadata = {'url': '/Partitions/{partitionId}/$/ResetLoad'} # type: ignore - - def recover_partition( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Indicates to the Service Fabric cluster that it should attempt to recover a specific partition that is currently stuck in quorum loss. - - This operation should only be performed if it is known that the replicas that are down cannot - be recovered. Incorrect use of this API can cause potential data loss. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.recover_partition.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - recover_partition.metadata = {'url': '/Partitions/{partitionId}/$/Recover'} # type: ignore - - def recover_service_partitions( - self, - service_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Indicates to the Service Fabric cluster that it should attempt to recover the specified service that is currently stuck in quorum loss. - - Indicates to the Service Fabric cluster that it should attempt to recover the specified service - that is currently stuck in quorum loss. This operation should only be performed if it is known - that the replicas that are down cannot be recovered. Incorrect use of this API can cause - potential data loss. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.recover_service_partitions.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - recover_service_partitions.metadata = {'url': '/Services/$/{serviceId}/$/GetPartitions/$/Recover'} # type: ignore - - def recover_system_partitions( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Indicates to the Service Fabric cluster that it should attempt to recover the system services that are currently stuck in quorum loss. - - Indicates to the Service Fabric cluster that it should attempt to recover the system services - that are currently stuck in quorum loss. This operation should only be performed if it is known - that the replicas that are down cannot be recovered. Incorrect use of this API can cause - potential data loss. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.recover_system_partitions.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - recover_system_partitions.metadata = {'url': '/$/RecoverSystemPartitions'} # type: ignore - - def recover_all_partitions( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Indicates to the Service Fabric cluster that it should attempt to recover any services (including system services) which are currently stuck in quorum loss. - - This operation should only be performed if it is known that the replicas that are down cannot - be recovered. Incorrect use of this API can cause potential data loss. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.recover_all_partitions.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - recover_all_partitions.metadata = {'url': '/$/RecoverAllPartitions'} # type: ignore - - def move_primary_replica( - self, - partition_id, # type: str - node_name=None, # type: Optional[str] - ignore_constraints=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Moves the primary replica of a partition of a stateful service. - - This command moves the primary replica of a partition of a stateful service, respecting all - constraints. - If NodeName parameter is specified, primary will be moved to the specified node (if constraints - allow it). - If NodeName parameter is not specified, primary replica will be moved to a random node in the - cluster. - If IgnoreConstraints parameter is specified and set to true, then primary will be moved - regardless of the constraints. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param node_name: The name of the node. - :type node_name: str - :param ignore_constraints: Ignore constraints when moving a replica or instance. If this - parameter is not specified, all constraints are honored. - :type ignore_constraints: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.move_primary_replica.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if node_name is not None: - query_parameters['NodeName'] = self._serialize.query("node_name", node_name, 'str') - if ignore_constraints is not None: - query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - move_primary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MovePrimaryReplica'} # type: ignore - - def move_secondary_replica( - self, - partition_id, # type: str - current_node_name, # type: str - new_node_name=None, # type: Optional[str] - ignore_constraints=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Moves the secondary replica of a partition of a stateful service. - - This command moves the secondary replica of a partition of a stateful service, respecting all - constraints. - CurrentNodeName parameter must be specified to identify the replica that is moved. - Source node name must be specified, but new node name can be omitted, and in that case replica - is moved to a random node. - If IgnoreConstraints parameter is specified and set to true, then secondary will be moved - regardless of the constraints. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param current_node_name: The name of the source node for secondary replica move. - :type current_node_name: str - :param new_node_name: The name of the target node for secondary replica or instance move. If - not specified, replica or instance is moved to a random node. - :type new_node_name: str - :param ignore_constraints: Ignore constraints when moving a replica or instance. If this - parameter is not specified, all constraints are honored. - :type ignore_constraints: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.move_secondary_replica.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') - if new_node_name is not None: - query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') - if ignore_constraints is not None: - query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - move_secondary_replica.metadata = {'url': '/Partitions/{partitionId}/$/MoveSecondaryReplica'} # type: ignore - - def update_partition_load( - self, - partition_metric_load_description_list, # type: List["_models.PartitionMetricLoadDescription"] - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedUpdatePartitionLoadResultList" - """Update the loads of provided partitions for specific metrics. - - Updates the load value and predicted load value for all the partitions provided for specified - metrics. - - :param partition_metric_load_description_list: Description of updating load for list of - partitions. - :type partition_metric_load_description_list: list[~azure.servicefabric.models.PartitionMetricLoadDescription] - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedUpdatePartitionLoadResultList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedUpdatePartitionLoadResultList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedUpdatePartitionLoadResultList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_partition_load.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(partition_metric_load_description_list, '[PartitionMetricLoadDescription]') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedUpdatePartitionLoadResultList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - update_partition_load.metadata = {'url': '/$/UpdatePartitionLoad'} # type: ignore - - def move_instance( - self, - service_id, # type: str - partition_id, # type: str - current_node_name=None, # type: Optional[str] - new_node_name=None, # type: Optional[str] - ignore_constraints=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Moves the instance of a partition of a stateless service. - - This command moves the instance of a partition of a stateless service, respecting all - constraints. - Partition id and service name must be specified to be able to move the instance. - CurrentNodeName when specified identifies the instance that is moved. If not specified, random - instance will be moved - New node name can be omitted, and in that case instance is moved to a random node. - If IgnoreConstraints parameter is specified and set to true, then instance will be moved - regardless of the constraints. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param current_node_name: The name of the source node for instance move. If not specified, - instance is moved from a random node. - :type current_node_name: str - :param new_node_name: The name of the target node for secondary replica or instance move. If - not specified, replica or instance is moved to a random node. - :type new_node_name: str - :param ignore_constraints: Ignore constraints when moving a replica or instance. If this - parameter is not specified, all constraints are honored. - :type ignore_constraints: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.move_instance.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if current_node_name is not None: - query_parameters['CurrentNodeName'] = self._serialize.query("current_node_name", current_node_name, 'str') - if new_node_name is not None: - query_parameters['NewNodeName'] = self._serialize.query("new_node_name", new_node_name, 'str') - if ignore_constraints is not None: - query_parameters['IgnoreConstraints'] = self._serialize.query("ignore_constraints", ignore_constraints, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - move_instance.metadata = {'url': '/Services/{serviceId}/$/GetPartitions/{partitionId}/$/MoveInstance'} # type: ignore - - def create_repair_task( - self, - repair_task, # type: "_models.RepairTask" - **kwargs # type: Any - ): - # type: (...) -> "_models.RepairTaskUpdateInfo" - """Creates a new repair task. - - For clusters that have the Repair Manager Service configured, - this API provides a way to create repair tasks that run automatically or manually. - For repair tasks that run automatically, an appropriate repair executor - must be running for each repair action to run automatically. - These are currently only available in specially-configured Azure Cloud Services. - - To create a manual repair task, provide the set of impacted node names and the - expected impact. When the state of the created repair task changes to approved, - you can safely perform repair actions on those nodes. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param repair_task: Describes the repair task to be created or updated. - :type repair_task: ~azure.servicefabric.models.RepairTask - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_repair_task.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(repair_task, 'RepairTask') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - create_repair_task.metadata = {'url': '/$/CreateRepairTask'} # type: ignore - - def cancel_repair_task( - self, - repair_task_cancel_description, # type: "_models.RepairTaskCancelDescription" - **kwargs # type: Any - ): - # type: (...) -> "_models.RepairTaskUpdateInfo" - """Requests the cancellation of the given repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param repair_task_cancel_description: Describes the repair task to be cancelled. - :type repair_task_cancel_description: ~azure.servicefabric.models.RepairTaskCancelDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.cancel_repair_task.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(repair_task_cancel_description, 'RepairTaskCancelDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - cancel_repair_task.metadata = {'url': '/$/CancelRepairTask'} # type: ignore - - def delete_repair_task( - self, - task_id, # type: str - version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes a completed repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param task_id: The ID of the completed repair task to be deleted. - :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. - :type version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _repair_task_delete_description = _models.RepairTaskDeleteDescription(task_id=task_id, version=version) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.delete_repair_task.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_repair_task_delete_description, 'RepairTaskDeleteDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_repair_task.metadata = {'url': '/$/DeleteRepairTask'} # type: ignore - - def get_repair_task_list( - self, - task_id_filter=None, # type: Optional[str] - state_filter=None, # type: Optional[int] - executor_filter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> List["_models.RepairTask"] - """Gets a list of repair tasks matching the given filters. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param task_id_filter: The repair task ID prefix to be matched. - :type task_id_filter: str - :param state_filter: A bitwise-OR of the following values, specifying which task states should - be included in the result list. - - - * 1 - Created - * 2 - Claimed - * 4 - Preparing - * 8 - Approved - * 16 - Executing - * 32 - Restoring - * 64 - Completed. - :type state_filter: int - :param executor_filter: The name of the repair executor whose claimed tasks should be included - in the list. - :type executor_filter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of RepairTask, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.RepairTask] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.RepairTask"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_repair_task_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if task_id_filter is not None: - query_parameters['TaskIdFilter'] = self._serialize.query("task_id_filter", task_id_filter, 'str') - if state_filter is not None: - query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') - if executor_filter is not None: - query_parameters['ExecutorFilter'] = self._serialize.query("executor_filter", executor_filter, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[RepairTask]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_repair_task_list.metadata = {'url': '/$/GetRepairTaskList'} # type: ignore - - def force_approve_repair_task( - self, - task_id, # type: str - version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.RepairTaskUpdateInfo" - """Forces the approval of the given repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param task_id: The ID of the repair task. - :type task_id: str - :param version: The current version number of the repair task. If non-zero, then the request - will only succeed if this value matches the actual current version of the repair task. If zero, - then no version check is performed. - :type version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _repair_task_approve_description = _models.RepairTaskApproveDescription(task_id=task_id, version=version) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.force_approve_repair_task.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_repair_task_approve_description, 'RepairTaskApproveDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - force_approve_repair_task.metadata = {'url': '/$/ForceApproveRepairTask'} # type: ignore - - def update_repair_task_health_policy( - self, - repair_task_update_health_policy_description, # type: "_models.RepairTaskUpdateHealthPolicyDescription" - **kwargs # type: Any - ): - # type: (...) -> "_models.RepairTaskUpdateInfo" - """Updates the health policy of the given repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param repair_task_update_health_policy_description: Describes the repair task healthy policy - to be updated. - :type repair_task_update_health_policy_description: ~azure.servicefabric.models.RepairTaskUpdateHealthPolicyDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_repair_task_health_policy.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(repair_task_update_health_policy_description, 'RepairTaskUpdateHealthPolicyDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - update_repair_task_health_policy.metadata = {'url': '/$/UpdateRepairTaskHealthPolicy'} # type: ignore - - def update_repair_execution_state( - self, - repair_task, # type: "_models.RepairTask" - **kwargs # type: Any - ): - # type: (...) -> "_models.RepairTaskUpdateInfo" - """Updates the execution state of a repair task. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param repair_task: Describes the repair task to be created or updated. - :type repair_task: ~azure.servicefabric.models.RepairTask - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RepairTaskUpdateInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RepairTaskUpdateInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RepairTaskUpdateInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_repair_execution_state.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(repair_task, 'RepairTask') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RepairTaskUpdateInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - update_repair_execution_state.metadata = {'url': '/$/UpdateRepairExecutionState'} # type: ignore - - def get_replica_info_list( - self, - partition_id, # type: str - continuation_token_parameter=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedReplicaInfoList" - """Gets the information about replicas of a Service Fabric service partition. - - The GetReplicas endpoint returns information about the replicas of the specified partition. The - response includes the ID, role, status, health, node name, uptime, and other details about the - replica. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedReplicaInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedReplicaInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedReplicaInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_replica_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedReplicaInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_replica_info_list.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas'} # type: ignore - - def get_replica_info( - self, - partition_id, # type: str - replica_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.ReplicaInfo"] - """Gets the information about a replica of a Service Fabric partition. - - The response includes the ID, role, status, health, node name, uptime, and other details about - the replica. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ReplicaInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ReplicaInfo or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicaInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_replica_info.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ReplicaInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_replica_info.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}'} # type: ignore - - def get_replica_health( - self, - partition_id, # type: str - replica_id, # type: str - events_health_state_filter=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ReplicaHealth" - """Gets the health of a Service Fabric stateful service replica or stateless service instance. - - Gets the health of a Service Fabric replica. - Use EventsHealthStateFilter to filter the collection of health events reported on the replica - based on the health state. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ReplicaHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ReplicaHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicaHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_replica_health.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ReplicaHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} # type: ignore - - def get_replica_health_using_policy( - self, - partition_id, # type: str - replica_id, # type: str - events_health_state_filter=0, # type: Optional[int] - timeout=60, # type: Optional[int] - application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ReplicaHealth" - """Gets the health of a Service Fabric stateful service replica or stateless service instance using the specified policy. - - Gets the health of a Service Fabric stateful service replica or stateless service instance. - Use EventsHealthStateFilter to filter the collection of health events reported on the cluster - based on the health state. - Use ApplicationHealthPolicy to optionally override the health policies used to evaluate the - health. This API only uses 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The - rest of the fields are ignored while evaluating the health of the replica. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ReplicaHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ReplicaHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicaHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_replica_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ReplicaHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_replica_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} # type: ignore - - def report_replica_health( - self, - partition_id, # type: str - replica_id, # type: str - health_information, # type: "_models.HealthInformation" - service_kind="Stateful", # type: Union[str, "_models.ReplicaHealthReportServiceKind"] - immediate=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sends a health report on the Service Fabric replica. - - Reports health state of the specified Service Fabric replica. The report must contain the - information about the source of the health report and property on which it is reported. - The report is sent to a Service Fabric gateway Replica, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, run GetReplicaHealth and check that - the report appears in the HealthEvents section. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param service_kind: The kind of service replica (Stateless or Stateful) for which the health - is being reported. Following are the possible values. - :type service_kind: str or ~azure.servicefabric.models.ReplicaHealthReportServiceKind - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_replica_health.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceKind'] = self._serialize.query("service_kind", service_kind, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/ReportHealth'} # type: ignore - - def get_deployed_service_replica_info_list( - self, - node_name, # type: str - application_id, # type: str - partition_id=None, # type: Optional[str] - service_manifest_name=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional[List["_models.DeployedServiceReplicaInfo"]] - """Gets the list of replicas deployed on a Service Fabric node. - - Gets the list containing the information about replicas deployed on a Service Fabric node. The - information include partition ID, replica ID, status of the replica, name of the service, name - of the service type, and other information. Use PartitionId or ServiceManifestName query - parameters to return information about the deployed replicas matching the specified values for - those parameters. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServiceReplicaInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServiceReplicaInfo] or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServiceReplicaInfo"]]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_replica_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if partition_id is not None: - query_parameters['PartitionId'] = self._serialize.query("partition_id", partition_id, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServiceReplicaInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_replica_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetReplicas'} # type: ignore - - def get_deployed_service_replica_detail_info( - self, - node_name, # type: str - partition_id, # type: str - replica_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.DeployedServiceReplicaDetailInfo" - """Gets the details of replica deployed on a Service Fabric node. - - Gets the details of the replica deployed on a Service Fabric node. The information includes - service kind, service name, current service operation, current service operation start date - time, partition ID, replica/instance ID, reported load, and other information. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedServiceReplicaDetailInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServiceReplicaDetailInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_replica_detail_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_replica_detail_info.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetDetail'} # type: ignore - - def get_deployed_service_replica_detail_info_by_partition_id( - self, - node_name, # type: str - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.DeployedServiceReplicaDetailInfo" - """Gets the details of replica deployed on a Service Fabric node. - - Gets the details of the replica deployed on a Service Fabric node. The information includes - service kind, service name, current service operation, current service operation start date - time, partition ID, replica/instance ID, reported load, and other information. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedServiceReplicaDetailInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedServiceReplicaDetailInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServiceReplicaDetailInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_replica_detail_info_by_partition_id.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedServiceReplicaDetailInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_replica_detail_info_by_partition_id.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas'} # type: ignore - - def restart_replica( - self, - node_name, # type: str - partition_id, # type: str - replica_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Restarts a service replica of a persisted service running on a node. - - Restarts a service replica of a persisted service running on a node. Warning - There are no - safety checks performed when this API is used. Incorrect use of this API can lead to - availability loss for stateful services. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.restart_replica.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - restart_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Restart'} # type: ignore - - def remove_replica( - self, - node_name, # type: str - partition_id, # type: str - replica_id, # type: str - force_remove=None, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Removes a service replica running on a node. - - This API simulates a Service Fabric replica failure by removing a replica from a Service Fabric - cluster. The removal closes the replica, transitions the replica to the role None, and then - removes all of the state information of the replica from the cluster. This API tests the - replica state removal path, and simulates the report fault permanent path through client APIs. - Warning - There are no safety checks performed when this API is used. Incorrect use of this API - can lead to data loss for stateful services. In addition, the forceRemove flag impacts all - other replicas hosted in the same process. - - :param node_name: The name of the node. - :type node_name: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param force_remove: Remove a Service Fabric application or service forcefully without going - through the graceful shutdown sequence. This parameter can be used to forcefully delete an - application or service for which delete is timing out due to issues in the service code that - prevents graceful close of replicas. - :type force_remove: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.remove_replica.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if force_remove is not None: - query_parameters['ForceRemove'] = self._serialize.query("force_remove", force_remove, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Delete'} # type: ignore - - def get_deployed_service_package_info_list( - self, - node_name, # type: str - application_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List["_models.DeployedServicePackageInfo"] - """Gets the list of service packages deployed on a Service Fabric node. - - Returns the information about the service packages deployed on a Service Fabric node for the - given application. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServicePackageInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedServicePackageInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_package_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[DeployedServicePackageInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages'} # type: ignore - - def get_deployed_service_package_info_list_by_name( - self, - node_name, # type: str - application_id, # type: str - service_package_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Optional[List["_models.DeployedServicePackageInfo"]] - """Gets the list of service packages deployed on a Service Fabric node matching exactly the specified name. - - Returns the information about the service packages deployed on a Service Fabric node for the - given application. These results are of service packages whose name match exactly the service - package name specified as the parameter. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedServicePackageInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedServicePackageInfo] or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional[List["_models.DeployedServicePackageInfo"]]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_package_info_list_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DeployedServicePackageInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_package_info_list_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}'} # type: ignore - - def get_deployed_service_package_health( - self, - node_name, # type: str - application_id, # type: str - service_package_name, # type: str - events_health_state_filter=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.DeployedServicePackageHealth" - """Gets the information about health of a service package for a specific application deployed for a Service Fabric node and application. - - Gets the information about health of a service package for a specific application deployed on a - Service Fabric node. Use EventsHealthStateFilter to optionally filter for the collection of - HealthEvent objects reported on the deployed service package based on health state. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedServicePackageHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServicePackageHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_package_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedServicePackageHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} # type: ignore - - def get_deployed_service_package_health_using_policy( - self, - node_name, # type: str - application_id, # type: str - service_package_name, # type: str - events_health_state_filter=0, # type: Optional[int] - timeout=60, # type: Optional[int] - application_health_policy=None, # type: Optional["_models.ApplicationHealthPolicy"] - **kwargs # type: Any - ): - # type: (...) -> "_models.DeployedServicePackageHealth" - """Gets the information about health of service package for a specific application deployed on a Service Fabric node using the specified policy. - - Gets the information about health of a service package for a specific application deployed on a - Service Fabric node. using the specified policy. Use EventsHealthStateFilter to optionally - filter for the collection of HealthEvent objects reported on the deployed service package based - on health state. Use ApplicationHealthPolicy to optionally override the health policies used to - evaluate the health. This API only uses 'ConsiderWarningAsError' field of the - ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the - deployed service package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param events_health_state_filter: Allows filtering the collection of HealthEvent objects - returned based on health state. - The possible values for this parameter include integer value of one of the following health - states. - Only events that match the filter are returned. All events are used to evaluate the aggregated - health state. - If not specified, all entries are returned. The state values are flag-based enumeration, so - the value could be a combination of these values, obtained using the bitwise 'OR' operator. For - example, If the provided value is 6 then all of the events with HealthState value of OK (2) and - Warning (4) are returned. - - - * Default - Default value. Matches any HealthState. The value is zero. - * None - Filter that doesn't match any HealthState value. Used in order to return no results - on a given collection of states. The value is 1. - * Ok - Filter that matches input with HealthState value Ok. The value is 2. - * Warning - Filter that matches input with HealthState value Warning. The value is 4. - * Error - Filter that matches input with HealthState value Error. The value is 8. - * All - Filter that matches input with any HealthState value. The value is 65535. - :type events_health_state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param application_health_policy: Describes the health policies used to evaluate the health of - an application or one of its children. - If not present, the health evaluation uses the health policy from application manifest or the - default health policy. - :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy - :keyword callable cls: A custom type or function that will be passed the direct response - :return: DeployedServicePackageHealth, or the result of cls(response) - :rtype: ~azure.servicefabric.models.DeployedServicePackageHealth - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.DeployedServicePackageHealth"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_deployed_service_package_health_using_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if events_health_state_filter is not None: - query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if application_health_policy is not None: - body_content = self._serialize.body(application_health_policy, 'ApplicationHealthPolicy') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('DeployedServicePackageHealth', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_service_package_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} # type: ignore - - def report_deployed_service_package_health( - self, - node_name, # type: str - application_id, # type: str - service_package_name, # type: str - health_information, # type: "_models.HealthInformation" - immediate=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sends a health report on the Service Fabric deployed service package. - - Reports health state of the service package of the application deployed on a Service Fabric - node. The report must contain the information about the source of the health report and - property on which it is reported. - The report is sent to a Service Fabric gateway Service, which forwards to the health store. - The report may be accepted by the gateway, but rejected by the health store after extra - validation. - For example, the health store may reject the report because of an invalid parameter, like a - stale sequence number. - To see whether the report was applied in the health store, get deployed service package health - and check that the report appears in the HealthEvents section. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_package_name: The name of the service package. - :type service_package_name: str - :param health_information: Describes the health information for the health report. This - information needs to be present in all of the health reports sent to the health manager. - :type health_information: ~azure.servicefabric.models.HealthInformation - :param immediate: A flag that indicates whether the report should be sent immediately. - A health report is sent to a Service Fabric gateway Application, which forwards to the health - store. - If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health - store, regardless of the fabric client settings that the HTTP Gateway Application is using. - This is useful for critical reports that should be sent as soon as possible. - Depending on timing and other conditions, sending the report may still fail, for example if - the HTTP Gateway is closed or the message doesn't reach the Gateway. - If Immediate is set to false, the report is sent based on the health client settings from the - HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval - configuration. - This is the recommended setting because it allows the health client to optimize health - reporting messages to health store as well as health report processing. - By default, reports are not sent immediately. - :type immediate: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.report_deployed_service_package_health.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - 'servicePackageName': self._serialize.url("service_package_name", service_package_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if immediate is not None: - query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(health_information, 'HealthInformation') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - report_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/ReportHealth'} # type: ignore - - def deploy_service_package_to_node( - self, - node_name, # type: str - deploy_service_package_to_node_description, # type: "_models.DeployServicePackageToNodeDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Downloads all of the code packages associated with specified service manifest on the specified node. - - This API provides a way to download code packages including the container images on a specific - node outside of the normal application deployment and upgrade path. This is useful for the - large code packages and container images to be present on the node before the actual - application deployment and upgrade, thus significantly reducing the total time required for the - deployment or upgrade. - - :param node_name: The name of the node. - :type node_name: str - :param deploy_service_package_to_node_description: Describes information for deploying a - service package to a Service Fabric node. - :type deploy_service_package_to_node_description: ~azure.servicefabric.models.DeployServicePackageToNodeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.deploy_service_package_to_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(deploy_service_package_to_node_description, 'DeployServicePackageToNodeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - deploy_service_package_to_node.metadata = {'url': '/Nodes/{nodeName}/$/DeployServicePackage'} # type: ignore - - def get_deployed_code_package_info_list( - self, - node_name, # type: str - application_id, # type: str - service_manifest_name=None, # type: Optional[str] - code_package_name=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List["_models.DeployedCodePackageInfo"] - """Gets the list of code packages deployed on a Service Fabric node. - - Gets the list of code packages deployed on a Service Fabric node for the given application. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param code_package_name: The name of code package specified in service manifest registered as - part of an application type in a Service Fabric cluster. - :type code_package_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of DeployedCodePackageInfo, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.DeployedCodePackageInfo] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DeployedCodePackageInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_deployed_code_package_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if service_manifest_name is not None: - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - if code_package_name is not None: - query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[DeployedCodePackageInfo]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_deployed_code_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages'} # type: ignore - - def restart_deployed_code_package( - self, - node_name, # type: str - application_id, # type: str - restart_deployed_code_package_description, # type: "_models.RestartDeployedCodePackageDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Restarts a code package deployed on a Service Fabric node in a cluster. - - Restarts a code package deployed on a Service Fabric node in a cluster. This aborts the code - package process, which will restart all the user service replicas hosted in that process. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param restart_deployed_code_package_description: Describes the deployed code package on - Service Fabric node to restart. - :type restart_deployed_code_package_description: ~azure.servicefabric.models.RestartDeployedCodePackageDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.restart_deployed_code_package.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(restart_deployed_code_package_description, 'RestartDeployedCodePackageDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - restart_deployed_code_package.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/Restart'} # type: ignore - - def get_container_logs_deployed_on_node( - self, - node_name, # type: str - application_id, # type: str - service_manifest_name, # type: str - code_package_name, # type: str - tail=None, # type: Optional[str] - previous=False, # type: Optional[bool] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ContainerLogs" - """Gets the container logs for container deployed on a Service Fabric node. - - Gets the container logs for container deployed on a Service Fabric node for the given code - package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param code_package_name: The name of code package specified in service manifest registered as - part of an application type in a Service Fabric cluster. - :type code_package_name: str - :param tail: Number of lines to show from the end of the logs. Default is 100. 'all' to show - the complete logs. - :type tail: str - :param previous: Specifies whether to get container logs from exited/dead containers of the - code package instance. - :type previous: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ContainerLogs, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ContainerLogs - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerLogs"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_container_logs_deployed_on_node.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') - if tail is not None: - query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') - if previous is not None: - query_parameters['Previous'] = self._serialize.query("previous", previous, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ContainerLogs', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_container_logs_deployed_on_node.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerLogs'} # type: ignore - - def invoke_container_api( - self, - node_name, # type: str - application_id, # type: str - service_manifest_name, # type: str - code_package_name, # type: str - code_package_instance_id, # type: str - container_api_request_body, # type: "_models.ContainerApiRequestBody" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ContainerApiResponse" - """Invoke container API on a container deployed on a Service Fabric node. - - Invoke container API on a container deployed on a Service Fabric node for the given code - package. - - :param node_name: The name of the node. - :type node_name: str - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param service_manifest_name: The name of a service manifest registered as part of an - application type in a Service Fabric cluster. - :type service_manifest_name: str - :param code_package_name: The name of code package specified in service manifest registered as - part of an application type in a Service Fabric cluster. - :type code_package_name: str - :param code_package_instance_id: ID that uniquely identifies a code package instance deployed - on a service fabric node. - :type code_package_instance_id: str - :param container_api_request_body: Parameters for making container API call. - :type container_api_request_body: ~azure.servicefabric.models.ContainerApiRequestBody - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ContainerApiResponse, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ContainerApiResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerApiResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.invoke_container_api.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') - query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') - query_parameters['CodePackageInstanceId'] = self._serialize.query("code_package_instance_id", code_package_instance_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(container_api_request_body, 'ContainerApiRequestBody') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ContainerApiResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - invoke_container_api.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerApi'} # type: ignore - - def create_compose_deployment( - self, - create_compose_deployment_description, # type: "_models.CreateComposeDeploymentDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a Service Fabric compose deployment. - - Compose is a file format that describes multi-container applications. This API allows deploying - container based applications defined in compose format in a Service Fabric cluster. Once the - deployment is created, its status can be tracked via the ``GetComposeDeploymentStatus`` API. - - :param create_compose_deployment_description: Describes the compose deployment that needs to be - created. - :type create_compose_deployment_description: ~azure.servicefabric.models.CreateComposeDeploymentDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_compose_deployment.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(create_compose_deployment_description, 'CreateComposeDeploymentDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_compose_deployment.metadata = {'url': '/ComposeDeployments/$/Create'} # type: ignore - - def get_compose_deployment_status( - self, - deployment_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ComposeDeploymentStatusInfo" - """Gets information about a Service Fabric compose deployment. - - Returns the status of the compose deployment that was created or in the process of being - created in the Service Fabric cluster and whose name matches the one specified as the - parameter. The response includes the name, status, and other details about the deployment. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ComposeDeploymentStatusInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ComposeDeploymentStatusInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ComposeDeploymentStatusInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_compose_deployment_status.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ComposeDeploymentStatusInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_compose_deployment_status.metadata = {'url': '/ComposeDeployments/{deploymentName}'} # type: ignore - - def get_compose_deployment_status_list( - self, - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedComposeDeploymentStatusInfoList" - """Gets the list of compose deployments created in the Service Fabric cluster. - - Gets the status about the compose deployments that were created or in the process of being - created in the Service Fabric cluster. The response includes the name, status, and other - details about the compose deployments. If the list of deployments do not fit in a page, one - page of results is returned as well as a continuation token, which can be used to get the next - page. - - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedComposeDeploymentStatusInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedComposeDeploymentStatusInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedComposeDeploymentStatusInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_compose_deployment_status_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedComposeDeploymentStatusInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_compose_deployment_status_list.metadata = {'url': '/ComposeDeployments'} # type: ignore - - def get_compose_deployment_upgrade_progress( - self, - deployment_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ComposeDeploymentUpgradeProgressInfo" - """Gets details for the latest upgrade performed on this Service Fabric compose deployment. - - Returns the information about the state of the compose deployment upgrade along with details to - aid debugging application health issues. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ComposeDeploymentUpgradeProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ComposeDeploymentUpgradeProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ComposeDeploymentUpgradeProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_compose_deployment_upgrade_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ComposeDeploymentUpgradeProgressInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_compose_deployment_upgrade_progress.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/GetUpgradeProgress'} # type: ignore - - def remove_compose_deployment( - self, - deployment_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes an existing Service Fabric compose deployment from cluster. - - Deletes an existing Service Fabric compose deployment. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.remove_compose_deployment.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - remove_compose_deployment.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Delete'} # type: ignore - - def start_compose_deployment_upgrade( - self, - deployment_name, # type: str - compose_deployment_upgrade_description, # type: "_models.ComposeDeploymentUpgradeDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Starts upgrading a compose deployment in the Service Fabric cluster. - - Validates the supplied upgrade parameters and starts upgrading the deployment if the parameters - are valid. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param compose_deployment_upgrade_description: Parameters for upgrading compose deployment. - :type compose_deployment_upgrade_description: ~azure.servicefabric.models.ComposeDeploymentUpgradeDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_compose_deployment_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(compose_deployment_upgrade_description, 'ComposeDeploymentUpgradeDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Upgrade'} # type: ignore - - def start_rollback_compose_deployment_upgrade( - self, - deployment_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Starts rolling back a compose deployment upgrade in the Service Fabric cluster. - - Rollback a service fabric compose deployment upgrade. - - :param deployment_name: The identity of the deployment. - :type deployment_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_rollback_compose_deployment_upgrade.metadata['url'] # type: ignore - path_format_arguments = { - 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_rollback_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/RollbackUpgrade'} # type: ignore - - def get_chaos( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.Chaos" - """Get the status of Chaos. - - Get the status of Chaos indicating whether or not Chaos is running, the Chaos parameters used - for running Chaos and the status of the Chaos Schedule. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: Chaos, or the result of cls(response) - :rtype: ~azure.servicefabric.models.Chaos - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.Chaos"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_chaos.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('Chaos', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_chaos.metadata = {'url': '/Tools/Chaos'} # type: ignore - - def start_chaos( - self, - chaos_parameters, # type: "_models.ChaosParameters" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Starts Chaos in the cluster. - - If Chaos is not already running in the cluster, it starts Chaos with the passed in Chaos - parameters. - If Chaos is already running when this call is made, the call fails with the error code - FABRIC_E_CHAOS_ALREADY_RUNNING. - Refer to the article `Induce controlled Chaos in Service Fabric clusters - `_ for more - details. - - :param chaos_parameters: Describes all the parameters to configure a Chaos run. - :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.start_chaos.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(chaos_parameters, 'ChaosParameters') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_chaos.metadata = {'url': '/Tools/Chaos/$/Start'} # type: ignore - - def stop_chaos( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Stops Chaos if it is running in the cluster and put the Chaos Schedule in a stopped state. - - Stops Chaos from executing new faults. In-flight faults will continue to execute until they are - complete. The current Chaos Schedule is put into a stopped state. - Once a schedule is stopped, it will stay in the stopped state and not be used to Chaos Schedule - new runs of Chaos. A new Chaos Schedule must be set in order to resume scheduling. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.stop_chaos.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - stop_chaos.metadata = {'url': '/Tools/Chaos/$/Stop'} # type: ignore - - def get_chaos_events( - self, - continuation_token_parameter=None, # type: Optional[str] - start_time_utc=None, # type: Optional[str] - end_time_utc=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ChaosEventsSegment" - """Gets the next segment of the Chaos events based on the continuation token or the time range. - - To get the next segment of the Chaos events, you can specify the ContinuationToken. To get the - start of a new segment of Chaos events, you can specify the time range - through StartTimeUtc and EndTimeUtc. You cannot specify both the ContinuationToken and the time - range in the same call. - When there are more than 100 Chaos events, the Chaos events are returned in multiple segments - where a segment contains no more than 100 Chaos events and to get the next segment you make a - call to this API with the continuation token. - - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param start_time_utc: The Windows file time representing the start time of the time range for - which a Chaos report is to be generated. Consult `DateTime.ToFileTimeUtc Method - `_.aspx) for - details. - :type start_time_utc: str - :param end_time_utc: The Windows file time representing the end time of the time range for - which a Chaos report is to be generated. Consult `DateTime.ToFileTimeUtc Method - `_.aspx) for - details. - :type end_time_utc: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ChaosEventsSegment, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ChaosEventsSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ChaosEventsSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_chaos_events.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if start_time_utc is not None: - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - if end_time_utc is not None: - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ChaosEventsSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_chaos_events.metadata = {'url': '/Tools/Chaos/Events'} # type: ignore - - def get_chaos_schedule( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ChaosScheduleDescription" - """Get the Chaos Schedule defining when and how to run Chaos. - - Gets the version of the Chaos Schedule in use and the Chaos Schedule that defines when and how - to run Chaos. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ChaosScheduleDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ChaosScheduleDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ChaosScheduleDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_chaos_schedule.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ChaosScheduleDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} # type: ignore - - def post_chaos_schedule( - self, - timeout=60, # type: Optional[int] - version=None, # type: Optional[int] - schedule=None, # type: Optional["_models.ChaosSchedule"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the schedule used by Chaos. - - Chaos will automatically schedule runs based on the Chaos Schedule. - The Chaos Schedule will be updated if the provided version matches the version on the server. - When updating the Chaos Schedule, the version on the server is incremented by 1. - The version on the server will wrap back to 0 after reaching a large number. - If Chaos is running when this call is made, the call will fail. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param version: The version number of the Schedule. - :type version: int - :param schedule: Defines the schedule used by Chaos. - :type schedule: ~azure.servicefabric.models.ChaosSchedule - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _chaos_schedule = _models.ChaosScheduleDescription(version=version, schedule=schedule) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.post_chaos_schedule.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_chaos_schedule, 'ChaosScheduleDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - post_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} # type: ignore - - def upload_file( - self, - content_path, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Uploads contents of the file to the image store. - - Uploads contents of the file to the image store. Use this API if the file is small enough to - upload again if the connection fails. The file's data needs to be added to the request body. - The contents will be uploaded to the specified path. Image store service uses a mark file to - indicate the availability of the folder. The mark file is an empty file named "_.dir". The mark - file is generated by the image store service when all files in a folder are uploaded. When - using File-by-File approach to upload application package in REST, the image store service - isn't aware of the file hierarchy of the application package; you need to create a mark file - per folder and upload it last, to let the image store service know that the folder is complete. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.upload_file.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - upload_file.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore - - def get_image_store_content( - self, - content_path, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ImageStoreContent" - """Gets the image store content information. - - Returns the information about the image store content at the specified contentPath. The - contentPath is relative to the root of the image store. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ImageStoreContent, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ImageStoreContent - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreContent"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_content.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ImageStoreContent', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore - - def delete_image_store_content( - self, - content_path, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes existing image store content. - - Deletes existing image store content being found within the given image store relative path. - This command can be used to delete uploaded application packages once they are provisioned. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_image_store_content.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} # type: ignore - - def get_image_store_root_content( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ImageStoreContent" - """Gets the content information at the root of the image store. - - Returns the information about the image store content at the root of the image store. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ImageStoreContent, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ImageStoreContent - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreContent"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_root_content.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ImageStoreContent', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_root_content.metadata = {'url': '/ImageStore'} # type: ignore - - def copy_image_store_content( - self, - image_store_copy_description, # type: "_models.ImageStoreCopyDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Copies image store content internally. - - Copies the image store content from the source image store relative path to the destination - image store relative path. - - :param image_store_copy_description: Describes the copy description for the image store. - :type image_store_copy_description: ~azure.servicefabric.models.ImageStoreCopyDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.copy_image_store_content.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(image_store_copy_description, 'ImageStoreCopyDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - copy_image_store_content.metadata = {'url': '/ImageStore/$/Copy'} # type: ignore - - def delete_image_store_upload_session( - self, - session_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Cancels an image store upload session. - - The DELETE request will cause the existing upload session to expire and remove any previously - uploaded file chunks. - - :param session_id: A GUID generated by the user for a file uploading. It identifies an image - store upload session which keeps track of all file chunks until it is committed. - :type session_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_image_store_upload_session.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_image_store_upload_session.metadata = {'url': '/ImageStore/$/DeleteUploadSession'} # type: ignore - - def commit_image_store_upload_session( - self, - session_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Commit an image store upload session. - - When all file chunks have been uploaded, the upload session needs to be committed explicitly to - complete the upload. Image store preserves the upload session until the expiration time, which - is 30 minutes after the last chunk received. - - :param session_id: A GUID generated by the user for a file uploading. It identifies an image - store upload session which keeps track of all file chunks until it is committed. - :type session_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.commit_image_store_upload_session.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - commit_image_store_upload_session.metadata = {'url': '/ImageStore/$/CommitUploadSession'} # type: ignore - - def get_image_store_upload_session_by_id( - self, - session_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.UploadSession" - """Get the image store upload session by ID. - - Gets the image store upload session identified by the given ID. User can query the upload - session at any time during uploading. - - :param session_id: A GUID generated by the user for a file uploading. It identifies an image - store upload session which keeps track of all file chunks until it is committed. - :type session_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UploadSession, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UploadSession - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UploadSession"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_upload_session_by_id.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UploadSession', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_upload_session_by_id.metadata = {'url': '/ImageStore/$/GetUploadSession'} # type: ignore - - def get_image_store_upload_session_by_path( - self, - content_path, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.UploadSession" - """Get the image store upload session by relative path. - - Gets the image store upload session associated with the given image store relative path. User - can query the upload session at any time during uploading. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UploadSession, or the result of cls(response) - :rtype: ~azure.servicefabric.models.UploadSession - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UploadSession"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_upload_session_by_path.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('UploadSession', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_upload_session_by_path.metadata = {'url': '/ImageStore/{contentPath}/$/GetUploadSession'} # type: ignore - - def upload_file_chunk( - self, - content_path, # type: str - session_id, # type: str - content_range, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Uploads a file chunk to the image store relative path. - - Uploads a file chunk to the image store with the specified upload session ID and image store - relative path. This API allows user to resume the file upload operation. user doesn't have to - restart the file upload from scratch whenever there is a network interruption. Use this option - if the file size is large. - - To perform a resumable file upload, user need to break the file into multiple chunks and upload - these chunks to the image store one-by-one. Chunks don't have to be uploaded in order. If the - file represented by the image store relative path already exists, it will be overwritten when - the upload session commits. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param session_id: A GUID generated by the user for a file uploading. It identifies an image - store upload session which keeps track of all file chunks until it is committed. - :type session_id: str - :param content_range: When uploading file chunks to the image store, the Content-Range header - field need to be configured and sent with a request. The format should looks like "bytes - {First-Byte-Position}-{Last-Byte-Position}/{File-Length}". For example, Content-Range:bytes - 300-5000/20000 indicates that user is sending bytes 300 through 5,000 and the total file length - is 20,000 bytes. - :type content_range: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.upload_file_chunk.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['session-id'] = self._serialize.query("session_id", session_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Range'] = self._serialize.header("content_range", content_range, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - upload_file_chunk.metadata = {'url': '/ImageStore/{contentPath}/$/UploadChunk'} # type: ignore - - def get_image_store_root_folder_size( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.FolderSizeInfo" - """Get the folder size at the root of the image store. - - Returns the total size of files at the root and children folders in image store. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FolderSizeInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.FolderSizeInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FolderSizeInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_root_folder_size.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('FolderSizeInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_root_folder_size.metadata = {'url': '/ImageStore/$/FolderSize'} # type: ignore - - def get_image_store_folder_size( - self, - content_path, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.FolderSizeInfo" - """Get the size of a folder in image store. - - Gets the total size of file under a image store folder, specified by contentPath. The - contentPath is relative to the root of the image store. - - :param content_path: Relative path to file or folder in the image store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FolderSizeInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.FolderSizeInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FolderSizeInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_folder_size.metadata['url'] # type: ignore - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('FolderSizeInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_folder_size.metadata = {'url': '/ImageStore/{contentPath}/$/FolderSize'} # type: ignore - - def get_image_store_info( - self, - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ImageStoreInfo" - """Gets the overall ImageStore information. - - Returns information about the primary ImageStore replica, such as disk capacity and available - disk space at the node it is on, and several categories of the ImageStore's file system usage. - - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ImageStoreInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.ImageStoreInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageStoreInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_image_store_info.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ImageStoreInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_image_store_info.metadata = {'url': '/ImageStore/$/Info'} # type: ignore - - def invoke_infrastructure_command( - self, - command, # type: str - service_id=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> IO - """Invokes an administrative command on the given Infrastructure Service instance. - - For clusters that have one or more instances of the Infrastructure Service configured, - this API provides a way to send infrastructure-specific commands to a particular - instance of the Infrastructure Service. - - Available commands and their corresponding response formats vary depending upon - the infrastructure on which the cluster is running. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param command: The text of the command to be invoked. The content of the command is - infrastructure-specific. - :type command: str - :param service_id: The identity of the infrastructure service. This is the full name of the - infrastructure service without the 'fabric:' URI scheme. This parameter required only for the - cluster that has more than one instance of infrastructure service running. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.invoke_infrastructure_command.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['Command'] = self._serialize.query("command", command, 'str') - if service_id is not None: - query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - invoke_infrastructure_command.metadata = {'url': '/$/InvokeInfrastructureCommand'} # type: ignore - - def invoke_infrastructure_query( - self, - command, # type: str - service_id=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> IO - """Invokes a read-only query on the given infrastructure service instance. - - For clusters that have one or more instances of the Infrastructure Service configured, - this API provides a way to send infrastructure-specific queries to a particular - instance of the Infrastructure Service. - - Available commands and their corresponding response formats vary depending upon - the infrastructure on which the cluster is running. - - This API supports the Service Fabric platform; it is not meant to be used directly from your - code. - - :param command: The text of the command to be invoked. The content of the command is - infrastructure-specific. - :type command: str - :param service_id: The identity of the infrastructure service. This is the full name of the - infrastructure service without the 'fabric:' URI scheme. This parameter required only for the - cluster that has more than one instance of infrastructure service running. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.invoke_infrastructure_query.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['Command'] = self._serialize.query("command", command, 'str') - if service_id is not None: - query_parameters['ServiceId'] = self._serialize.query("service_id", service_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - invoke_infrastructure_query.metadata = {'url': '/$/InvokeInfrastructureQuery'} # type: ignore - - def start_data_loss( - self, - service_id, # type: str - partition_id, # type: str - operation_id, # type: str - data_loss_mode, # type: Union[str, "_models.DataLossMode"] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """This API will induce data loss for the specified partition. It will trigger a call to the OnDataLossAsync API of the partition. - - This API will induce data loss for the specified partition. It will trigger a call to the - OnDataLoss API of the partition. - Actual data loss will depend on the specified DataLossMode. - - - * PartialDataLoss - Only a quorum of replicas are removed and OnDataLoss is triggered for the - partition but actual data loss depends on the presence of in-flight replication. - * FullDataLoss - All replicas are removed hence all data is lost and OnDataLoss is triggered. - - This API should only be called with a stateful service as the target. - - Calling this API with a system service as the target is not advised. - - Note: Once this API has been called, it cannot be reversed. Calling CancelOperation will only - stop execution and clean up internal system state. - It will not restore data if the command has progressed far enough to cause data loss. - - Call the GetDataLossProgress API with the same OperationId to return information on the - operation started with this API. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param data_loss_mode: This enum is passed to the StartDataLoss API to indicate what type of - data loss to induce. - :type data_loss_mode: str or ~azure.servicefabric.models.DataLossMode - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_data_loss.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['DataLossMode'] = self._serialize.query("data_loss_mode", data_loss_mode, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_data_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartDataLoss'} # type: ignore - - def get_data_loss_progress( - self, - service_id, # type: str - partition_id, # type: str - operation_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PartitionDataLossProgress" - """Gets the progress of a partition data loss operation started using the StartDataLoss API. - - Gets the progress of a data loss operation started with StartDataLoss, using the OperationId. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionDataLossProgress, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionDataLossProgress - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionDataLossProgress"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_data_loss_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionDataLossProgress', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_data_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetDataLossProgress'} # type: ignore - - def start_quorum_loss( - self, - service_id, # type: str - partition_id, # type: str - operation_id, # type: str - quorum_loss_mode, # type: Union[str, "_models.QuorumLossMode"] - quorum_loss_duration, # type: int - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Induces quorum loss for a given stateful service partition. - - This API is useful for a temporary quorum loss situation on your service. - - Call the GetQuorumLossProgress API with the same OperationId to return information on the - operation started with this API. - - This can only be called on stateful persisted (HasPersistedState==true) services. Do not use - this API on stateless services or stateful in-memory only services. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param quorum_loss_mode: This enum is passed to the StartQuorumLoss API to indicate what type - of quorum loss to induce. - :type quorum_loss_mode: str or ~azure.servicefabric.models.QuorumLossMode - :param quorum_loss_duration: The amount of time for which the partition will be kept in quorum - loss. This must be specified in seconds. - :type quorum_loss_duration: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_quorum_loss.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['QuorumLossMode'] = self._serialize.query("quorum_loss_mode", quorum_loss_mode, 'str') - query_parameters['QuorumLossDuration'] = self._serialize.query("quorum_loss_duration", quorum_loss_duration, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_quorum_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartQuorumLoss'} # type: ignore - - def get_quorum_loss_progress( - self, - service_id, # type: str - partition_id, # type: str - operation_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PartitionQuorumLossProgress" - """Gets the progress of a quorum loss operation on a partition started using the StartQuorumLoss API. - - Gets the progress of a quorum loss operation started with StartQuorumLoss, using the provided - OperationId. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionQuorumLossProgress, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionQuorumLossProgress - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionQuorumLossProgress"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_quorum_loss_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionQuorumLossProgress', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_quorum_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetQuorumLossProgress'} # type: ignore - - def start_partition_restart( - self, - service_id, # type: str - partition_id, # type: str - operation_id, # type: str - restart_partition_mode, # type: Union[str, "_models.RestartPartitionMode"] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """This API will restart some or all replicas or instances of the specified partition. - - This API is useful for testing failover. - - If used to target a stateless service partition, RestartPartitionMode must be - AllReplicasOrInstances. - - Call the GetPartitionRestartProgress API using the same OperationId to get the progress. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param restart_partition_mode: Describe which partitions to restart. - :type restart_partition_mode: str or ~azure.servicefabric.models.RestartPartitionMode - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_partition_restart.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['RestartPartitionMode'] = self._serialize.query("restart_partition_mode", restart_partition_mode, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_partition_restart.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartRestart'} # type: ignore - - def get_partition_restart_progress( - self, - service_id, # type: str - partition_id, # type: str - operation_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PartitionRestartProgress" - """Gets the progress of a PartitionRestart operation started using StartPartitionRestart. - - Gets the progress of a PartitionRestart started with StartPartitionRestart using the provided - OperationId. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param partition_id: The identity of the partition. - :type partition_id: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionRestartProgress, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionRestartProgress - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionRestartProgress"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_restart_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionRestartProgress', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_restart_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetRestartProgress'} # type: ignore - - def start_node_transition( - self, - node_name, # type: str - operation_id, # type: str - node_transition_type, # type: Union[str, "_models.NodeTransitionType"] - node_instance_id, # type: str - stop_duration_in_seconds, # type: int - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Starts or stops a cluster node. - - Starts or stops a cluster node. A cluster node is a process, not the OS instance itself. To - start a node, pass in "Start" for the NodeTransitionType parameter. - To stop a node, pass in "Stop" for the NodeTransitionType parameter. This API starts the - operation - when the API returns the node may not have finished transitioning yet. - Call GetNodeTransitionProgress with the same OperationId to get the progress of the operation. - - :param node_name: The name of the node. - :type node_name: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param node_transition_type: Indicates the type of transition to perform. - NodeTransitionType.Start will start a stopped node. NodeTransitionType.Stop will stop a node - that is up. - :type node_transition_type: str or ~azure.servicefabric.models.NodeTransitionType - :param node_instance_id: The node instance ID of the target node. This can be determined - through GetNodeInfo API. - :type node_instance_id: str - :param stop_duration_in_seconds: The duration, in seconds, to keep the node stopped. The - minimum value is 600, the maximum is 14400. After this time expires, the node will - automatically come back up. - :type stop_duration_in_seconds: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.start_node_transition.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['NodeTransitionType'] = self._serialize.query("node_transition_type", node_transition_type, 'str') - query_parameters['NodeInstanceId'] = self._serialize.query("node_instance_id", node_instance_id, 'str') - query_parameters['StopDurationInSeconds'] = self._serialize.query("stop_duration_in_seconds", stop_duration_in_seconds, 'int', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - start_node_transition.metadata = {'url': '/Faults/Nodes/{nodeName}/$/StartTransition/'} # type: ignore - - def get_node_transition_progress( - self, - node_name, # type: str - operation_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.NodeTransitionProgress" - """Gets the progress of an operation started using StartNodeTransition. - - Gets the progress of an operation started with StartNodeTransition using the provided - OperationId. - - :param node_name: The name of the node. - :type node_name: str - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: NodeTransitionProgress, or the result of cls(response) - :rtype: ~azure.servicefabric.models.NodeTransitionProgress - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.NodeTransitionProgress"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_transition_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('NodeTransitionProgress', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_transition_progress.metadata = {'url': '/Faults/Nodes/{nodeName}/$/GetTransitionProgress'} # type: ignore - - def get_fault_operation_list( - self, - type_filter=65535, # type: int - state_filter=65535, # type: int - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List["_models.OperationStatus"] - """Gets a list of user-induced fault operations filtered by provided input. - - Gets the list of user-induced fault operations filtered by provided input. - - :param type_filter: Used to filter on OperationType for user-induced operations. - - - * 65535 - select all - * 1 - select PartitionDataLoss. - * 2 - select PartitionQuorumLoss. - * 4 - select PartitionRestart. - * 8 - select NodeTransition. - :type type_filter: int - :param state_filter: Used to filter on OperationState's for user-induced operations. - - - * 65535 - select All - * 1 - select Running - * 2 - select RollingBack - * 8 - select Completed - * 16 - select Faulted - * 32 - select Cancelled - * 64 - select ForceCancelled. - :type state_filter: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of OperationStatus, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.OperationStatus] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.OperationStatus"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_fault_operation_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['TypeFilter'] = self._serialize.query("type_filter", type_filter, 'int') - query_parameters['StateFilter'] = self._serialize.query("state_filter", state_filter, 'int') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[OperationStatus]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_fault_operation_list.metadata = {'url': '/Faults/'} # type: ignore - - def cancel_operation( - self, - operation_id, # type: str - force=False, # type: bool - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Cancels a user-induced fault operation. - - The following APIs start fault operations that may be cancelled by using CancelOperation: - StartDataLoss, StartQuorumLoss, StartPartitionRestart, StartNodeTransition. - - If force is false, then the specified user-induced operation will be gracefully stopped and - cleaned up. If force is true, the command will be aborted, and some internal state - may be left behind. Specifying force as true should be used with care. Calling this API with - force set to true is not allowed until this API has already - been called on the same test command with force set to false first, or unless the test command - already has an OperationState of OperationState.RollingBack. - Clarification: OperationState.RollingBack means that the system will be/is cleaning up internal - system state caused by executing the command. It will not restore data if the - test command was to cause data loss. For example, if you call StartDataLoss then call this - API, the system will only clean up internal state from running the command. - It will not restore the target partition's data, if the command progressed far enough to cause - data loss. - - Important note: if this API is invoked with force==true, internal state may be left behind. - - :param operation_id: A GUID that identifies a call of this API. This is passed into the - corresponding GetProgress API. - :type operation_id: str - :param force: Indicates whether to gracefully roll back and clean up internal system state - modified by executing the user-induced operation. - :type force: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.cancel_operation.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['OperationId'] = self._serialize.query("operation_id", operation_id, 'str') - query_parameters['Force'] = self._serialize.query("force", force, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - cancel_operation.metadata = {'url': '/Faults/$/Cancel'} # type: ignore - - def create_backup_policy( - self, - backup_policy_description, # type: "_models.BackupPolicyDescription" - timeout=60, # type: Optional[int] - validate_connection=False, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a backup policy. - - Creates a backup policy which can be associated later with a Service Fabric application, - service or a partition for periodic backup. - - :param backup_policy_description: Describes the backup policy. - :type backup_policy_description: ~azure.servicefabric.models.BackupPolicyDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param validate_connection: Specifies whether to validate the storage connection and - credentials before creating or updating the backup policies. - :type validate_connection: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_backup_policy.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if validate_connection is not None: - query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/$/Create'} # type: ignore - - def delete_backup_policy( - self, - backup_policy_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes the backup policy. - - Deletes an existing backup policy. A backup policy must be created before it can be deleted. A - currently active backup policy, associated with any Service Fabric application, service or - partition, cannot be deleted without first deleting the mapping. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_backup_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Delete'} # type: ignore - - def get_backup_policy_list( - self, - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedBackupPolicyDescriptionList" - """Gets all the backup policies configured. - - Get a list of all the backup policies configured. - - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupPolicyDescriptionList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupPolicyDescriptionList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupPolicyDescriptionList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_backup_policy_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupPolicyDescriptionList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_backup_policy_list.metadata = {'url': '/BackupRestore/BackupPolicies'} # type: ignore - - def get_backup_policy_by_name( - self, - backup_policy_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.BackupPolicyDescription" - """Gets a particular backup policy by name. - - Gets a particular backup policy identified by {backupPolicyName}. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BackupPolicyDescription, or the result of cls(response) - :rtype: ~azure.servicefabric.models.BackupPolicyDescription - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicyDescription"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_backup_policy_by_name.metadata['url'] # type: ignore - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('BackupPolicyDescription', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_backup_policy_by_name.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}'} # type: ignore - - def get_all_entities_backed_up_by_policy( - self, - backup_policy_name, # type: str - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedBackupEntityList" - """Gets the list of backup entities that are associated with this policy. - - Returns a list of Service Fabric application, service or partition which are associated with - this backup policy. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupEntityList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupEntityList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupEntityList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_all_entities_backed_up_by_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupEntityList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_all_entities_backed_up_by_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/GetBackupEnabledEntities'} # type: ignore - - def update_backup_policy( - self, - backup_policy_name, # type: str - backup_policy_description, # type: "_models.BackupPolicyDescription" - timeout=60, # type: Optional[int] - validate_connection=False, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> None - """Updates the backup policy. - - Updates the backup policy identified by {backupPolicyName}. - - :param backup_policy_name: The name of the backup policy. - :type backup_policy_name: str - :param backup_policy_description: Describes the backup policy. - :type backup_policy_description: ~azure.servicefabric.models.BackupPolicyDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param validate_connection: Specifies whether to validate the storage connection and - credentials before creating or updating the backup policies. - :type validate_connection: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.update_backup_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if validate_connection is not None: - query_parameters['ValidateConnection'] = self._serialize.query("validate_connection", validate_connection, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - update_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Update'} # type: ignore - - def enable_application_backup( - self, - application_id, # type: str - backup_policy_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Enables periodic backup of stateful partitions under this Service Fabric application. - - Enables periodic backup of stateful partitions which are part of this Service Fabric - application. Each partition is backed up individually as per the specified backup policy - description. - Note only C# based Reliable Actor and Reliable Stateful services are currently supported for - periodic backup. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.enable_application_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - enable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/EnableBackup'} # type: ignore - - def disable_application_backup( - self, - application_id, # type: str - clean_backup, # type: bool - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Disables periodic backup of Service Fabric application. - - Disables periodic backup of Service Fabric application which was previously enabled. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the - backups which were created for the backup entity that is getting disabled for backup. - :type clean_backup: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.disable_application_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _disable_backup_description is not None: - body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - disable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/DisableBackup'} # type: ignore - - def get_application_backup_configuration_info( - self, - application_id, # type: str - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedBackupConfigurationInfoList" - """Gets the Service Fabric application backup configuration information. - - Gets the Service Fabric backup configuration information for the application and the services - and partitions under this application. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupConfigurationInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupConfigurationInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_backup_configuration_info.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupConfigurationInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_backup_configuration_info.metadata = {'url': '/Applications/{applicationId}/$/GetBackupConfigurationInfo'} # type: ignore - - def get_application_backup_list( - self, - application_id, # type: str - timeout=60, # type: Optional[int] - latest=False, # type: Optional[bool] - start_date_time_filter=None, # type: Optional[datetime.datetime] - end_date_time_filter=None, # type: Optional[datetime.datetime] - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedBackupInfoList" - """Gets the list of backups available for every partition in this application. - - Returns a list of backups available for every partition in this Service Fabric application. The - server enumerates all the backups available at the backup location configured in the backup - policy. It also allows filtering of the result based on start and end datetime or just fetching - the latest available backup for every partition. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param latest: Specifies whether to get only the most recent backup available for a partition - for the specified time range. - :type latest: bool - :param start_date_time_filter: Specify the start date time from which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, all backups from the beginning are enumerated. - :type start_date_time_filter: ~datetime.datetime - :param end_date_time_filter: Specify the end date time till which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, enumeration is done till the most recent backup. - :type end_date_time_filter: ~datetime.datetime - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_backup_list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if latest is not None: - query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') - if start_date_time_filter is not None: - query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') - if end_date_time_filter is not None: - query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_backup_list.metadata = {'url': '/Applications/{applicationId}/$/GetBackups'} # type: ignore - - def suspend_application_backup( - self, - application_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Suspends periodic backup for the specified Service Fabric application. - - The application which is configured to take periodic backups, is suspended for taking further - backups till it is resumed again. This operation applies to the entire application's hierarchy. - It means all the services and partitions under this application are now suspended for backup. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.suspend_application_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - suspend_application_backup.metadata = {'url': '/Applications/{applicationId}/$/SuspendBackup'} # type: ignore - - def resume_application_backup( - self, - application_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Resumes periodic backup of a Service Fabric application which was previously suspended. - - The previously suspended Service Fabric application resumes taking periodic backup as per the - backup policy currently configured for the same. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.resume_application_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_application_backup.metadata = {'url': '/Applications/{applicationId}/$/ResumeBackup'} # type: ignore - - def enable_service_backup( - self, - service_id, # type: str - backup_policy_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Enables periodic backup of stateful partitions under this Service Fabric service. - - Enables periodic backup of stateful partitions which are part of this Service Fabric service. - Each partition is backed up individually as per the specified backup policy description. In - case the application, which the service is part of, is already enabled for backup then this - operation would override the policy being used to take the periodic backup for this service and - its partitions (unless explicitly overridden at the partition level). - Note only C# based Reliable Actor and Reliable Stateful services are currently supported for - periodic backup. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.enable_service_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - enable_service_backup.metadata = {'url': '/Services/{serviceId}/$/EnableBackup'} # type: ignore - - def disable_service_backup( - self, - service_id, # type: str - clean_backup, # type: bool - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Disables periodic backup of Service Fabric service which was previously enabled. - - Disables periodic backup of Service Fabric service which was previously enabled. Backup must be - explicitly enabled before it can be disabled. - In case the backup is enabled for the Service Fabric application, which this service is part - of, this service would continue to be periodically backed up as per the policy mapped at the - application level. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the - backups which were created for the backup entity that is getting disabled for backup. - :type clean_backup: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.disable_service_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _disable_backup_description is not None: - body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - disable_service_backup.metadata = {'url': '/Services/{serviceId}/$/DisableBackup'} # type: ignore - - def get_service_backup_configuration_info( - self, - service_id, # type: str - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedBackupConfigurationInfoList" - """Gets the Service Fabric service backup configuration information. - - Gets the Service Fabric backup configuration information for the service and the partitions - under this service. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupConfigurationInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupConfigurationInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_backup_configuration_info.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupConfigurationInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_backup_configuration_info.metadata = {'url': '/Services/{serviceId}/$/GetBackupConfigurationInfo'} # type: ignore - - def get_service_backup_list( - self, - service_id, # type: str - timeout=60, # type: Optional[int] - latest=False, # type: Optional[bool] - start_date_time_filter=None, # type: Optional[datetime.datetime] - end_date_time_filter=None, # type: Optional[datetime.datetime] - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedBackupInfoList" - """Gets the list of backups available for every partition in this service. - - Returns a list of backups available for every partition in this Service Fabric service. The - server enumerates all the backups available in the backup store configured in the backup - policy. It also allows filtering of the result based on start and end datetime or just fetching - the latest available backup for every partition. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param latest: Specifies whether to get only the most recent backup available for a partition - for the specified time range. - :type latest: bool - :param start_date_time_filter: Specify the start date time from which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, all backups from the beginning are enumerated. - :type start_date_time_filter: ~datetime.datetime - :param end_date_time_filter: Specify the end date time till which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, enumeration is done till the most recent backup. - :type end_date_time_filter: ~datetime.datetime - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_backup_list.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if latest is not None: - query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') - if start_date_time_filter is not None: - query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') - if end_date_time_filter is not None: - query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_backup_list.metadata = {'url': '/Services/{serviceId}/$/GetBackups'} # type: ignore - - def suspend_service_backup( - self, - service_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Suspends periodic backup for the specified Service Fabric service. - - The service which is configured to take periodic backups, is suspended for taking further - backups till it is resumed again. This operation applies to the entire service's hierarchy. It - means all the partitions under this service are now suspended for backup. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.suspend_service_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - suspend_service_backup.metadata = {'url': '/Services/{serviceId}/$/SuspendBackup'} # type: ignore - - def resume_service_backup( - self, - service_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Resumes periodic backup of a Service Fabric service which was previously suspended. - - The previously suspended Service Fabric service resumes taking periodic backup as per the - backup policy currently configured for the same. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.resume_service_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_service_backup.metadata = {'url': '/Services/{serviceId}/$/ResumeBackup'} # type: ignore - - def enable_partition_backup( - self, - partition_id, # type: str - backup_policy_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Enables periodic backup of the stateful persisted partition. - - Enables periodic backup of stateful persisted partition. Each partition is backed up as per the - specified backup policy description. In case the application or service, which is partition is - part of, is already enabled for backup then this operation would override the policy being used - to take the periodic backup of this partition. - Note only C# based Reliable Actor and Reliable Stateful services are currently supported for - periodic backup. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param backup_policy_name: Name of the backup policy to be used for enabling periodic backups. - :type backup_policy_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _enable_backup_description = _models.EnableBackupDescription(backup_policy_name=backup_policy_name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.enable_partition_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_enable_backup_description, 'EnableBackupDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - enable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/EnableBackup'} # type: ignore - - def disable_partition_backup( - self, - partition_id, # type: str - clean_backup, # type: bool - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Disables periodic backup of Service Fabric partition which was previously enabled. - - Disables periodic backup of partition which was previously enabled. Backup must be explicitly - enabled before it can be disabled. - In case the backup is enabled for the Service Fabric application or service, which this - partition is part of, this partition would continue to be periodically backed up as per the - policy mapped at the higher level entity. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param clean_backup: Boolean flag to delete backups. It can be set to true for deleting all the - backups which were created for the backup entity that is getting disabled for backup. - :type clean_backup: bool - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _disable_backup_description = _models.DisableBackupDescription(clean_backup=clean_backup) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.disable_partition_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _disable_backup_description is not None: - body_content = self._serialize.body(_disable_backup_description, 'DisableBackupDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - disable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/DisableBackup'} # type: ignore - - def get_partition_backup_configuration_info( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PartitionBackupConfigurationInfo" - """Gets the partition backup configuration information. - - Gets the Service Fabric Backup configuration information for the specified partition. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PartitionBackupConfigurationInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PartitionBackupConfigurationInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionBackupConfigurationInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_backup_configuration_info.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PartitionBackupConfigurationInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_backup_configuration_info.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupConfigurationInfo'} # type: ignore - - def get_partition_backup_list( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - latest=False, # type: Optional[bool] - start_date_time_filter=None, # type: Optional[datetime.datetime] - end_date_time_filter=None, # type: Optional[datetime.datetime] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedBackupInfoList" - """Gets the list of backups available for the specified partition. - - Returns a list of backups available for the specified partition. The server enumerates all the - backups available in the backup store configured in the backup policy. It also allows filtering - of the result based on start and end datetime or just fetching the latest available backup for - the partition. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param latest: Specifies whether to get only the most recent backup available for a partition - for the specified time range. - :type latest: bool - :param start_date_time_filter: Specify the start date time from which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, all backups from the beginning are enumerated. - :type start_date_time_filter: ~datetime.datetime - :param end_date_time_filter: Specify the end date time till which to enumerate backups, in - datetime format. The date time must be specified in ISO8601 format. This is an optional - parameter. If not specified, enumeration is done till the most recent backup. - :type end_date_time_filter: ~datetime.datetime - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_backup_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if latest is not None: - query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') - if start_date_time_filter is not None: - query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') - if end_date_time_filter is not None: - query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_backup_list.metadata = {'url': '/Partitions/{partitionId}/$/GetBackups'} # type: ignore - - def suspend_partition_backup( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Suspends periodic backup for the specified partition. - - The partition which is configured to take periodic backups, is suspended for taking further - backups till it is resumed again. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.suspend_partition_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - suspend_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/SuspendBackup'} # type: ignore - - def resume_partition_backup( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Resumes periodic backup of partition which was previously suspended. - - The previously suspended partition resumes taking periodic backup as per the backup policy - currently configured for the same. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.resume_partition_backup.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - resume_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/ResumeBackup'} # type: ignore - - def backup_partition( - self, - partition_id, # type: str - backup_timeout=10, # type: Optional[int] - timeout=60, # type: Optional[int] - backup_storage=None, # type: Optional["_models.BackupStorageDescription"] - **kwargs # type: Any - ): - # type: (...) -> None - """Triggers backup of the partition's state. - - Creates a backup of the stateful persisted partition's state. In case the partition is already - being periodically backed up, then by default the new backup is created at the same backup - storage. One can also override the same by specifying the backup storage details as part of the - request body. Once the backup is initiated, its progress can be tracked using the - GetBackupProgress operation. - In case, the operation times out, specify a greater backup timeout value in the query - parameter. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param backup_timeout: Specifies the maximum amount of time, in minutes, to wait for the backup - operation to complete. Post that, the operation completes with timeout error. However, in - certain corner cases it could be that though the operation returns back timeout, the backup - actually goes through. In case of timeout error, its recommended to invoke this operation again - with a greater timeout value. The default value for the same is 10 minutes. - :type backup_timeout: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param backup_storage: Specifies the details of the backup storage where to save the backup. - :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _backup_partition_description = _models.BackupPartitionDescription(backup_storage=backup_storage) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.backup_partition.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if backup_timeout is not None: - query_parameters['BackupTimeout'] = self._serialize.query("backup_timeout", backup_timeout, 'int') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if _backup_partition_description is not None: - body_content = self._serialize.body(_backup_partition_description, 'BackupPartitionDescription') - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - backup_partition.metadata = {'url': '/Partitions/{partitionId}/$/Backup'} # type: ignore - - def get_partition_backup_progress( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.BackupProgressInfo" - """Gets details for the latest backup triggered for this partition. - - Returns information about the state of the latest backup along with details or failure reason - in case of completion. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BackupProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.BackupProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_backup_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('BackupProgressInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_backup_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupProgress'} # type: ignore - - def restore_partition( - self, - partition_id, # type: str - restore_partition_description, # type: "_models.RestorePartitionDescription" - restore_timeout=10, # type: Optional[int] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Triggers restore of the state of the partition using the specified restore partition description. - - Restores the state of a of the stateful persisted partition using the specified backup point. - In case the partition is already being periodically backed up, then by default the backup point - is looked for in the storage specified in backup policy. One can also override the same by - specifying the backup storage details as part of the restore partition description in body. - Once the restore is initiated, its progress can be tracked using the GetRestoreProgress - operation. - In case, the operation times out, specify a greater restore timeout value in the query - parameter. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param restore_partition_description: Describes the parameters to restore the partition. - :type restore_partition_description: ~azure.servicefabric.models.RestorePartitionDescription - :param restore_timeout: Specifies the maximum amount of time to wait, in minutes, for the - restore operation to complete. Post that, the operation returns back with timeout error. - However, in certain corner cases it could be that the restore operation goes through even - though it completes with timeout. In case of timeout error, its recommended to invoke this - operation again with a greater timeout value. the default value for the same is 10 minutes. - :type restore_timeout: int - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.restore_partition.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if restore_timeout is not None: - query_parameters['RestoreTimeout'] = self._serialize.query("restore_timeout", restore_timeout, 'int') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(restore_partition_description, 'RestorePartitionDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - restore_partition.metadata = {'url': '/Partitions/{partitionId}/$/Restore'} # type: ignore - - def get_partition_restore_progress( - self, - partition_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.RestoreProgressInfo" - """Gets details for the latest restore operation triggered for this partition. - - Returns information about the state of the latest restore operation along with details or - failure reason in case of completion. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: RestoreProgressInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.RestoreProgressInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.RestoreProgressInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_restore_progress.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('RestoreProgressInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_restore_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetRestoreProgress'} # type: ignore - - def get_backups_from_backup_location( - self, - get_backup_by_storage_query_description, # type: "_models.GetBackupByStorageQueryDescription" - timeout=60, # type: Optional[int] - continuation_token_parameter=None, # type: Optional[str] - max_results=0, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedBackupInfoList" - """Gets the list of backups available for the specified backed up entity at the specified backup location. - - Gets the list of backups available for the specified backed up entity (Application, Service or - Partition) at the specified backup location (FileShare or Azure Blob Storage). - - :param get_backup_by_storage_query_description: Describes the filters and backup storage - details to be used for enumerating backups. - :type get_backup_by_storage_query_description: ~azure.servicefabric.models.GetBackupByStorageQueryDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param max_results: The maximum number of results to be returned as part of the paged queries. - This parameter defines the upper bound on the number of results returned. The results returned - can be less than the specified maximum results if they do not fit in the message as per the max - message size restrictions defined in the configuration. If this parameter is zero or not - specified, the paged query includes as many results as possible that fit in the return message. - :type max_results: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedBackupInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedBackupInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedBackupInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.get_backups_from_backup_location.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if max_results is not None: - query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(get_backup_by_storage_query_description, 'GetBackupByStorageQueryDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedBackupInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_backups_from_backup_location.metadata = {'url': '/BackupRestore/$/GetBackups'} # type: ignore - - def create_name( - self, - name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a Service Fabric name. - - Creates the specified Service Fabric name. - - :param name: The Service Fabric name, including the 'fabric:' URI scheme. - :type name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _name_description = _models.NameDescription(name=name) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.create_name.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_name_description, 'NameDescription') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - create_name.metadata = {'url': '/Names/$/Create'} # type: ignore - - def get_name_exists_info( - self, - name_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns whether the Service Fabric name exists. - - Returns whether the specified Service Fabric name exists. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_name_exists_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - get_name_exists_info.metadata = {'url': '/Names/{nameId}'} # type: ignore - - def delete_name( - self, - name_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes a Service Fabric name. - - Deletes the specified Service Fabric name. A name must be created before it can be deleted. - Deleting a name with child properties will fail. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_name.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_name.metadata = {'url': '/Names/{nameId}'} # type: ignore - - def get_sub_name_info_list( - self, - name_id, # type: str - recursive=False, # type: Optional[bool] - continuation_token_parameter=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedSubNameInfoList" - """Enumerates all the Service Fabric names under a given name. - - Enumerates all the Service Fabric names under a given name. If the subnames do not fit in a - page, one page of results is returned as well as a continuation token, which can be used to get - the next page. Querying a name that doesn't exist will fail. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param recursive: Allows specifying that the search performed should be recursive. - :type recursive: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedSubNameInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedSubNameInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedSubNameInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_sub_name_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if recursive is not None: - query_parameters['Recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedSubNameInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_sub_name_info_list.metadata = {'url': '/Names/{nameId}/$/GetSubNames'} # type: ignore - - def get_property_info_list( - self, - name_id, # type: str - include_values=False, # type: Optional[bool] - continuation_token_parameter=None, # type: Optional[str] - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PagedPropertyInfoList" - """Gets information on all Service Fabric properties under a given name. - - A Service Fabric name can have one or more named properties that store custom information. This - operation gets the information about these properties in a paged list. The information includes - name, value, and metadata about each of the properties. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param include_values: Allows specifying whether to include the values of the properties - returned. True if values should be returned with the metadata; False to return only property - metadata. - :type include_values: bool - :param continuation_token_parameter: The continuation token parameter is used to obtain next - set of results. A continuation token with a non-empty value is included in the response of the - API when the results from the system do not fit in a single response. When this value is passed - to the next API call, the API returns next set of results. If there are no further results, - then the continuation token does not contain a value. The value of this parameter should not be - URL encoded. - :type continuation_token_parameter: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PagedPropertyInfoList, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PagedPropertyInfoList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedPropertyInfoList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_property_info_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if include_values is not None: - query_parameters['IncludeValues'] = self._serialize.query("include_values", include_values, 'bool') - if continuation_token_parameter is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token_parameter", continuation_token_parameter, 'str', skip_quote=True) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PagedPropertyInfoList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_property_info_list.metadata = {'url': '/Names/{nameId}/$/GetProperties'} # type: ignore - - def put_property( - self, - name_id, # type: str - property_description, # type: "_models.PropertyDescription" - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates or updates a Service Fabric property. - - Creates or updates the specified Service Fabric property under a given name. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param property_description: Describes the Service Fabric property to be created. - :type property_description: ~azure.servicefabric.models.PropertyDescription - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.put_property.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(property_description, 'PropertyDescription') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - put_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore - - def get_property_info( - self, - name_id, # type: str - property_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.PropertyInfo" - """Gets the specified Service Fabric property. - - Gets the specified Service Fabric property under a given name. This will always return both - value and metadata. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param property_name: Specifies the name of the property to get. - :type property_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PropertyInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.PropertyInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PropertyInfo"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_property_info.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('PropertyInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_property_info.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore - - def delete_property( - self, - name_id, # type: str - property_name, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes the specified Service Fabric property. - - Deletes the specified Service Fabric property under a given name. A property must be created - before it can be deleted. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param property_name: Specifies the name of the property to get. - :type property_name: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.delete_property.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) - - delete_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} # type: ignore - - def submit_property_batch( - self, - name_id, # type: str - timeout=60, # type: Optional[int] - operations=None, # type: Optional[List["_models.PropertyBatchOperation"]] - **kwargs # type: Any - ): - # type: (...) -> Union["_models.SuccessfulPropertyBatchInfo", "_models.FailedPropertyBatchInfo"] - """Submits a property batch. - - Submits a batch of property operations. Either all or none of the operations will be committed. - - :param name_id: The Service Fabric name, without the 'fabric:' URI scheme. - :type name_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param operations: A list of the property batch operations to be executed. - :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SuccessfulPropertyBatchInfo or FailedPropertyBatchInfo, or the result of cls(response) - :rtype: ~azure.servicefabric.models.SuccessfulPropertyBatchInfo or ~azure.servicefabric.models.FailedPropertyBatchInfo - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.SuccessfulPropertyBatchInfo", "_models.FailedPropertyBatchInfo"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _property_batch_description_list = _models.PropertyBatchDescriptionList(operations=operations) - api_version = "8.0" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.submit_property_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(_property_batch_description_list, 'PropertyBatchDescriptionList') - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 409]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - if response.status_code == 200: - deserialized = self._deserialize('SuccessfulPropertyBatchInfo', pipeline_response) - - if response.status_code == 409: - deserialized = self._deserialize('FailedPropertyBatchInfo', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - submit_property_batch.metadata = {'url': '/Names/{nameId}/$/GetProperties/$/SubmitBatch'} # type: ignore - - def get_cluster_event_list( - self, - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ClusterEvent"] - """Gets all Cluster-related events. - - The response is list of ClusterEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ClusterEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ClusterEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ClusterEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_cluster_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ClusterEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_cluster_event_list.metadata = {'url': '/EventsStore/Cluster/Events'} # type: ignore - - def get_containers_event_list( - self, - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ContainerInstanceEvent"] - """Gets all Containers-related events. - - The response is list of ContainerInstanceEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ContainerInstanceEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ContainerInstanceEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ContainerInstanceEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_containers_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ContainerInstanceEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_containers_event_list.metadata = {'url': '/EventsStore/Containers/Events'} # type: ignore - - def get_node_event_list( - self, - node_name, # type: str - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.NodeEvent"] - """Gets a Node-related events. - - The response is list of NodeEvent objects. - - :param node_name: The name of the node. - :type node_name: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of NodeEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.NodeEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.NodeEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_node_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'nodeName': self._serialize.url("node_name", node_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[NodeEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_node_event_list.metadata = {'url': '/EventsStore/Nodes/{nodeName}/$/Events'} # type: ignore - - def get_nodes_event_list( - self, - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.NodeEvent"] - """Gets all Nodes-related Events. - - The response is list of NodeEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of NodeEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.NodeEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.NodeEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_nodes_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[NodeEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_nodes_event_list.metadata = {'url': '/EventsStore/Nodes/Events'} # type: ignore - - def get_application_event_list( - self, - application_id, # type: str - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ApplicationEvent"] - """Gets an Application-related events. - - The response is list of ApplicationEvent objects. - - :param application_id: The identity of the application. This is typically the full name of the - application without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the application name is "fabric:/myapp/app1", the application identity would - be "myapp~app1" in 6.0+ and "myapp/app1" in previous versions. - :type application_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ApplicationEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ApplicationEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_application_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ApplicationEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_application_event_list.metadata = {'url': '/EventsStore/Applications/{applicationId}/$/Events'} # type: ignore - - def get_applications_event_list( - self, - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ApplicationEvent"] - """Gets all Applications-related events. - - The response is list of ApplicationEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ApplicationEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ApplicationEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_applications_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ApplicationEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_applications_event_list.metadata = {'url': '/EventsStore/Applications/Events'} # type: ignore - - def get_service_event_list( - self, - service_id, # type: str - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ServiceEvent"] - """Gets a Service-related events. - - The response is list of ServiceEvent objects. - - :param service_id: The identity of the service. This ID is typically the full name of the - service without the 'fabric:' URI scheme. - Starting from version 6.0, hierarchical names are delimited with the "~" character. - For example, if the service name is "fabric:/myapp/app1/svc1", the service identity would be - "myapp~app1~svc1" in 6.0+ and "myapp/app1/svc1" in previous versions. - :type service_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ServiceEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ServiceEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_service_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ServiceEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_service_event_list.metadata = {'url': '/EventsStore/Services/{serviceId}/$/Events'} # type: ignore - - def get_services_event_list( - self, - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ServiceEvent"] - """Gets all Services-related events. - - The response is list of ServiceEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ServiceEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ServiceEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_services_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ServiceEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_services_event_list.metadata = {'url': '/EventsStore/Services/Events'} # type: ignore - - def get_partition_event_list( - self, - partition_id, # type: str - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.PartitionEvent"] - """Gets a Partition-related events. - - The response is list of PartitionEvent objects. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of PartitionEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.PartitionEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PartitionEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[PartitionEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Events'} # type: ignore - - def get_partitions_event_list( - self, - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.PartitionEvent"] - """Gets all Partitions-related events. - - The response is list of PartitionEvent objects. - - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of PartitionEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.PartitionEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PartitionEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partitions_event_list.metadata['url'] # type: ignore - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[PartitionEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partitions_event_list.metadata = {'url': '/EventsStore/Partitions/Events'} # type: ignore - - def get_partition_replica_event_list( - self, - partition_id, # type: str - replica_id, # type: str - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ReplicaEvent"] - """Gets a Partition Replica-related events. - - The response is list of ReplicaEvent objects. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param replica_id: The identifier of the replica. - :type replica_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ReplicaEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ReplicaEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ReplicaEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_replica_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ReplicaEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_replica_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/{replicaId}/$/Events'} # type: ignore - - def get_partition_replicas_event_list( - self, - partition_id, # type: str - start_time_utc, # type: str - end_time_utc, # type: str - timeout=60, # type: Optional[int] - events_types_filter=None, # type: Optional[str] - exclude_analysis_events=None, # type: Optional[bool] - skip_correlation_lookup=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> List["_models.ReplicaEvent"] - """Gets all Replicas-related events for a Partition. - - The response is list of ReplicaEvent objects. - - :param partition_id: The identity of the partition. - :type partition_id: str - :param start_time_utc: The start time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type start_time_utc: str - :param end_time_utc: The end time of a lookup query in ISO UTC yyyy-MM-ddTHH:mm:ssZ. - :type end_time_utc: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :param events_types_filter: This is a comma separated string specifying the types of - FabricEvents that should only be included in the response. - :type events_types_filter: str - :param exclude_analysis_events: This param disables the retrieval of AnalysisEvents if true is - passed. - :type exclude_analysis_events: bool - :param skip_correlation_lookup: This param disables the search of CorrelatedEvents information - if true is passed. otherwise the CorrelationEvents get processed and HasCorrelatedEvents field - in every FabricEvent gets populated. - :type skip_correlation_lookup: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of ReplicaEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.ReplicaEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ReplicaEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_partition_replicas_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') - query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') - if events_types_filter is not None: - query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') - if exclude_analysis_events is not None: - query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') - if skip_correlation_lookup is not None: - query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[ReplicaEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_partition_replicas_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/Events'} # type: ignore - - def get_correlated_event_list( - self, - event_instance_id, # type: str - timeout=60, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List["_models.FabricEvent"] - """Gets all correlated events for a given event. - - The response is list of FabricEvents. - - :param event_instance_id: The EventInstanceId. - :type event_instance_id: str - :param timeout: The server timeout for performing the operation in seconds. This timeout - specifies the time duration that the client is willing to wait for the requested operation to - complete. The default value for this parameter is 60 seconds. - :type timeout: long - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of FabricEvent, or the result of cls(response) - :rtype: list[~azure.servicefabric.models.FabricEvent] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FabricEvent"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "8.0" - accept = "application/json" - - # Construct URL - url = self.get_correlated_event_list.metadata['url'] # type: ignore - path_format_arguments = { - 'eventInstanceId': self._serialize.url("event_instance_id", event_instance_id, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.FabricError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('[FabricEvent]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_correlated_event_list.metadata = {'url': '/EventsStore/CorrelatedEvents/{eventInstanceId}/$/Events'} # type: ignore diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/py.typed b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/py.typed deleted file mode 100644 index e5aff4f83af8..000000000000 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_version.py b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/version.py similarity index 88% rename from sdk/servicefabric/azure-servicefabric/azure/servicefabric/_version.py rename to sdk/servicefabric/azure-servicefabric/azure/servicefabric/version.py index c268602f0728..dd2553c62906 100644 --- a/sdk/servicefabric/azure-servicefabric/azure/servicefabric/_version.py +++ b/sdk/servicefabric/azure-servicefabric/azure/servicefabric/version.py @@ -1,9 +1,13 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. # -------------------------------------------------------------------------- VERSION = "8.0.0.0" + From c908812c7d556b222094ef9be57885bbcfcf3f14 Mon Sep 17 00:00:00 2001 From: Travis Prescott Date: Mon, 3 May 2021 11:45:35 -0700 Subject: [PATCH 5/6] Updated changelog. --- .../azure-servicefabric/CHANGELOG.md | 282 ++---------------- 1 file changed, 18 insertions(+), 264 deletions(-) diff --git a/sdk/servicefabric/azure-servicefabric/CHANGELOG.md b/sdk/servicefabric/azure-servicefabric/CHANGELOG.md index 728b99540408..0292be2871dd 100644 --- a/sdk/servicefabric/azure-servicefabric/CHANGELOG.md +++ b/sdk/servicefabric/azure-servicefabric/CHANGELOG.md @@ -4,286 +4,40 @@ **Features** - - Model ServiceDescription has a new parameter tags_required_to_run - - Model ServiceDescription has a new parameter tags_required_to_place - - Model NodeInfo has a new parameter node_tags - - Model StatefulServiceDescription has a new parameter tags_required_to_run - - Model StatefulServiceDescription has a new parameter tags_required_to_place - - Model StatefulServiceDescription has a new parameter replica_lifecycle_description - - Model ApplicationInfo has a new parameter managed_application_identity - - Model ClusterHealthPolicy has a new parameter node_type_health_policy_map - - Model StatelessServiceDescription has a new parameter instance_lifecycle_description - - Model StatelessServiceDescription has a new parameter tags_required_to_run - - Model StatelessServiceDescription has a new parameter tags_required_to_place - - Model StatelessServiceDescription has a new parameter instance_restart_wait_duration_seconds - Model ApplicationUpgradeDescription has a new parameter managed_application_identity - - Model StatelessServiceUpdateDescription has a new parameter tags_for_placement - - Model StatelessServiceUpdateDescription has a new parameter service_dns_name - - Model StatelessServiceUpdateDescription has a new parameter instance_restart_wait_duration_seconds - Model StatelessServiceUpdateDescription has a new parameter instance_lifecycle_description - Model StatelessServiceUpdateDescription has a new parameter tags_for_running + - Model StatelessServiceUpdateDescription has a new parameter service_dns_name + - Model StatelessServiceUpdateDescription has a new parameter tags_for_placement + - Model StatelessServiceUpdateDescription has a new parameter instance_restart_wait_duration_seconds + - Model ServiceDescription has a new parameter tags_required_to_place + - Model ServiceDescription has a new parameter tags_required_to_run - Model ServiceUpdateDescription has a new parameter tags_for_placement - Model ServiceUpdateDescription has a new parameter tags_for_running - Model ServiceUpdateDescription has a new parameter service_dns_name + - Model StatefulServiceDescription has a new parameter replica_lifecycle_description + - Model StatefulServiceDescription has a new parameter tags_required_to_place + - Model StatefulServiceDescription has a new parameter tags_required_to_run + - Model StatelessServiceDescription has a new parameter instance_lifecycle_description + - Model StatelessServiceDescription has a new parameter instance_restart_wait_duration_seconds + - Model StatelessServiceDescription has a new parameter tags_required_to_place + - Model StatelessServiceDescription has a new parameter tags_required_to_run + - Model ClusterHealthPolicy has a new parameter node_type_health_policy_map + - Model NodeInfo has a new parameter node_tags - Model StatefulServiceUpdateDescription has a new parameter replica_lifecycle_description - Model StatefulServiceUpdateDescription has a new parameter tags_for_placement - Model StatefulServiceUpdateDescription has a new parameter tags_for_running - Model StatefulServiceUpdateDescription has a new parameter service_dns_name + - Model ApplicationInfo has a new parameter managed_application_identity - Added operation ServiceFabricClientAPIsOperationsMixin.add_node_tags - - Added operation ServiceFabricClientAPIsOperationsMixin.get_loaded_partition_info_list - - Added operation ServiceFabricClientAPIsOperationsMixin.remove_node_tags - Added operation ServiceFabricClientAPIsOperationsMixin.move_instance + - Added operation ServiceFabricClientAPIsOperationsMixin.remove_node_tags + - Added operation ServiceFabricClientAPIsOperationsMixin.get_loaded_partition_info_list **Breaking changes** - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_type_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_backup_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_health_using_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_health_using_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_backup_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_type_info_list_by_name has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_health_using_policy has a new signature - - Operation MeshApplicationOperations.create_or_update has a new signature - - Operation MeshApplicationOperations.delete has a new signature - - Operation MeshApplicationOperations.get has a new signature - - Operation MeshApplicationOperations.get_upgrade_progress has a new signature - - Operation MeshCodePackageOperations.get_container_logs has a new signature - - Operation MeshGatewayOperations.create_or_update has a new signature - - Operation MeshGatewayOperations.delete has a new signature - - Operation MeshGatewayOperations.get has a new signature - - Operation MeshNetworkOperations.create_or_update has a new signature - - Operation MeshNetworkOperations.delete has a new signature - - Operation MeshNetworkOperations.get has a new signature - - Operation MeshSecretOperations.create_or_update has a new signature - - Operation MeshSecretOperations.delete has a new signature - - Operation MeshSecretOperations.get has a new signature - - Operation MeshSecretValueOperations.add_value has a new signature - - Operation MeshSecretValueOperations.delete has a new signature - - Operation MeshSecretValueOperations.get has a new signature - - Operation MeshSecretValueOperations.list has a new signature - - Operation MeshSecretValueOperations.show has a new signature - - Operation MeshServiceOperations.get has a new signature - - Operation MeshServiceOperations.list has a new signature - - Operation MeshServiceReplicaOperations.get has a new signature - - Operation MeshServiceReplicaOperations.list has a new signature - - Operation MeshVolumeOperations.create_or_update has a new signature - - Operation MeshVolumeOperations.delete has a new signature - - Operation MeshVolumeOperations.get has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.add_configuration_parameter_overrides has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.backup_partition has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.cancel_operation has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.cancel_repair_task has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.commit_image_store_upload_session has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.copy_image_store_content has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.create_application has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.create_compose_deployment has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.create_name has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.create_repair_task has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.create_service has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.create_service_from_template has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.delete_application has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.delete_backup_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.delete_image_store_content has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.delete_image_store_upload_session has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.delete_name has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.delete_property has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.delete_repair_task has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.delete_service has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.deploy_service_package_to_node has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.disable_application_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.disable_node has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.disable_partition_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.disable_service_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.enable_application_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.enable_node has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.enable_partition_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.enable_service_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.force_approve_repair_task has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_aad_metadata has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_backup_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_load_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_manifest has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_name_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_type_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_type_info_list_by_name has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_applications_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_backup_policy_by_name has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_chaos has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_chaos_schedule has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_configuration has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_configuration_upgrade_status has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_health_chunk has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_health_using_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_load has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_manifest has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_upgrade_progress has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_version has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_compose_deployment_status has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_compose_deployment_upgrade_progress has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_configuration_overrides has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_container_logs_deployed_on_node has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_containers_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_correlated_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_data_loss_progress has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_application_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_application_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_code_package_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_info_list_by_name has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_replica_detail_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_replica_detail_info_by_partition_id has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_replica_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_type_info_by_name has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_type_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_fault_operation_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_content has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_folder_size has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_root_content has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_root_folder_size has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_upload_session_by_id has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_image_store_upload_session_by_path has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_name_exists_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_node_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_node_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_node_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_node_load_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_node_transition_progress has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_nodes_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_backup_configuration_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_backup_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_backup_progress has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_load_information has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_replica_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_replicas_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_restart_progress has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_restore_progress has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partitions_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_property_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_provisioned_fabric_code_version_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_provisioned_fabric_config_version_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_quorum_loss_progress has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_repair_task_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_backup_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_description has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_manifest has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_name_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_type_info_by_name has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_type_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_services_event_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_unplaced_replica_information has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_upgrade_orchestration_service_state has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.invoke_container_api has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.invoke_infrastructure_command has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.invoke_infrastructure_query has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.move_primary_replica has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.move_secondary_replica has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.post_chaos_schedule has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.provision_application_type has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.provision_cluster has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.put_property has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.recover_all_partitions has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.recover_partition has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.recover_service_partitions has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.recover_system_partitions has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.remove_compose_deployment has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.remove_configuration_overrides has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.remove_node_state has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.remove_replica has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.report_application_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.report_cluster_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.report_deployed_application_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.report_deployed_service_package_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.report_node_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.report_partition_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.report_replica_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.report_service_health has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.reset_partition_load has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.resolve_service has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.restart_deployed_code_package has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.restart_replica has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.restore_partition has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.resume_application_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.resume_application_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.resume_cluster_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.resume_partition_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.resume_service_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.rollback_application_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.rollback_cluster_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.set_upgrade_orchestration_service_state has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_application_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_chaos has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_cluster_configuration_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_cluster_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_compose_deployment_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_data_loss has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_node_transition has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_partition_restart has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_quorum_loss has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.start_rollback_compose_deployment_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.stop_chaos has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.submit_property_batch has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.suspend_application_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.suspend_partition_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.suspend_service_backup has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.toggle_verbose_service_placement_health_reporting has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.unprovision_application_type has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.unprovision_cluster has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.update_application_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.update_cluster_upgrade has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.update_repair_execution_state has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.update_repair_task_health_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.update_service has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.upload_file has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.upload_file_chunk has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_health_using_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_service_package_health_using_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_health_using_policy has a new signature - - Operation MeshApplicationOperations.list has a new signature - - Operation MeshSecretOperations.list has a new signature - - Operation MeshNetworkOperations.list has a new signature - - Operation MeshVolumeOperations.list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_sub_name_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.update_backup_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_backups_from_backup_location has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_backup_configuration_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.restart_node has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_application_health_using_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_node_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_application_backup_configuration_info has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_deployed_application_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_health_using_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_backup_policy_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_all_entities_backed_up_by_policy has a new signature - Operation ServiceFabricClientAPIsOperationsMixin.create_backup_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_cluster_health_chunk_using_policy_and_advanced_filters has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_property_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_health_using_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_compose_deployment_status_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_replica_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_service_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_partition_info_list has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.update_partition_load has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_node_health_using_policy has a new signature - - Operation ServiceFabricClientAPIsOperationsMixin.get_chaos_events has a new signature - - Operation MeshGatewayOperations.list has a new signature + - Operation ServiceFabricClientAPIsOperationsMixin.update_backup_policy has a new signature - Model AverageServiceLoadScalingTrigger has a new required parameter use_only_primary_load ## 7.2.0.46 (2020-10-29) From ce48fb11c8fa0e4589959673a35f2e2743bcea6c Mon Sep 17 00:00:00 2001 From: Travis Prescott Date: Mon, 3 May 2021 12:19:08 -0700 Subject: [PATCH 6/6] Update shared requirements. --- shared_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/shared_requirements.txt b/shared_requirements.txt index b5df03e3402d..4a7fb37fdfc4 100644 --- a/shared_requirements.txt +++ b/shared_requirements.txt @@ -174,6 +174,7 @@ opentelemetry-sdk<2.0.0,>=1.0.0 #override azure-servicebus uamqp>=1.3.0,<2.0.0 #override azure-servicebus msrest>=0.6.17,<2.0.0 #override azure-servicebus azure-core<2.0.0,>=1.13.0 +#override azure-servicefabric msrest>=0.6.21 #override azure-search-documents msrest>=0.6.21 #override azure-synapse-accesscontrol azure-core>=1.6.0,<2.0.0 #override azure-synapse-spark azure-core>=1.6.0,<2.0.0